Beispiel #1
0
def run_evaluation_boundary_predictions(network_name):
    pathPrefix = './AC4_small/'
    img_gt_search_string = pathPrefix + 'labels/*.tif'
    img_pred_search_string = pathPrefix + 'boundaryProbabilities/'+network_name+'/*.tif'

    img_files_gt = sorted( glob.glob( img_gt_search_string ) )
    img_files_pred = sorted( glob.glob( img_pred_search_string ) )

    allVI = []
    allVI_split = []
    allVI_merge = []

    allRand = []
    allRand_split = []
    allRand_merge = []

    for i in xrange(np.shape(img_files_pred)[0]):
        print img_files_pred[i]
        im_gt = mahotas.imread(img_files_gt[i])
        im_pred = mahotas.imread(img_files_pred[i])
        im_pred = im_pred / 255.0

        VI_score = []
        VI_score_split = []
        VI_score_merge = []

        Rand_score = []
        Rand_score_split = []
        Rand_score_merge = []
    
        start_time = time.clock()

        for thresh in np.arange(0,1,0.05):
            # white regions, black boundaries
            im_seg = im_pred>thresh
            # connected components
            seeds, nr_regions = mahotas.label(im_seg)
            
            result = segmentation_metrics(im_gt, seeds, seq=False)   
            
            VI_score.append(result['VI']['F-score'])
            VI_score_split.append(result['VI']['split'])
            VI_score_merge.append(result['VI']['merge'])

            Rand_score.append(result['Rand']['F-score'])
            Rand_score_split.append(result['Rand']['split'])
            Rand_score_merge.append(result['Rand']['merge'])

        print "This took in seconds: ", time.clock() - start_time

        allVI.append(VI_score)
        allVI_split.append(VI_score_split)
        allVI_merge.append(VI_score_merge)

        allRand.append(Rand_score)
        allRand_split.append(Rand_score_split)
        allRand_merge.append(Rand_score_merge)
        
    with open(pathPrefix+network_name+'.pkl', 'wb') as file:
        cPickle.dump((allVI, allVI_split, allVI_merge, allRand, allRand_split, allRand_merge), file)
Beispiel #2
0
def file_reader(filename, **kwds):
    '''Read data from any format supported by PIL or freeimage if mahotas is 
    installed
    
    Note that only when mahotas and freeimage are installed it is possible 
    to read 16-bit tiff files.
    
    Parameters
    ----------
    filename: str
        By using '*' it is possible to load a collection of images of the same 
        size into a three dimensional dataset.
    '''
    if '*' in filename:
        from glob import glob
        flist=glob(filename)
        flist.sort()
        imsample = imread(flist[0])
        w=imsample.shape[0]
        h=imsample.shape[1]
        d=len(flist)
        dc=np.zeros((w,h,d))
        for i in xrange(d):
            dc[:,:,i] = imread(flist[i])
    else:
        dc = imread(filename)
    dt = 'image'    
    return [{'data':dc, 
             'mapped_parameters': {
                'name': filename,
                'original_filename' : filename,
                'record_by': dt,
                'signal' : None,
                }
             }]
def load_image(f):
    'Load the triple (PROTEIN, DNA, ROIS)'
    import mahotas as mh
    f = DATADIR+f
    return mh.imread(f.replace('dna', 'protein')), \
            mh.imread(f), \
            mh.imread(f.replace('dna', 'rois'))
def compare1(f):
    rois = mh.imread(f.replace('rois2', 'rois'))
    rois2 = mh.imread(f)
    rarea = (rois != 0).ravel()
    rarea2 = (rois2 != 0).ravel()
    return (rarea.mean(),
            rarea2.mean(),
            np.corrcoef(rarea, rarea2)[0,1])
def readImages(imgCount, inputpath, labelpath):
    """
    reads all the images in the stored file location, as well as generates random pixel coordinates to create patches from
    :param imgCount: the number of images used for training the classifier
    :param filepath: the file path to the images
    """
    input=[]
    labels=[]
    for i in range(imgCount):
        input.append((mahotas.imread(inputpath+str(i)+".tif")))
        labels.append((mahotas.imread(labelpath+str(i)+".tif")))
    return [input, labels]
def compare_arand(f):
    from sklearn import metrics
    from scipy.spatial import distance
    rois = mh.imread(f.replace('rois2', 'rois'))
    rois2 = mh.imread(f)
    rois = (rois.ravel() != 0)
    rois2 = (rois2.ravel() != 0)
    arand = metrics.adjusted_rand_score(rois, rois2)
    # Note that scipy returns the Jaccard Distance, which is 1 - Jaccard Index
    # sklearn does not really implement jaccard, but an interpretation where
    # jaccard is just a synonym for accuracy.

    jaccard = 1. - distance.jaccard(rois, rois2)
    mcc = metrics.matthews_corrcoef(rois, rois2)
    return arand, jaccard, mcc
    def open_image_and_gold(image_index, crop_from, crop_size):
        path = image_path_format_string.format(image_index)
        gold_path = gold_image_path_format_string.format(image_index)

        # Open raw image
        image = np.float32(normalize_image(mahotas.imread(path)[crop_from[0]:crop_from[0]+crop_size,crop_from[1]:crop_from[1]+crop_size]))

        # Open gold standard image
        gold_image = mahotas.imread(gold_path)[crop_from[0]:crop_from[0]+crop_size,crop_from[1]:crop_from[1]+crop_size]

        # Convert to ids
        if len(gold_image.shape) == 3:
            gold_image = (np.uint32(gold_image[:,:,0]) * 2**16 + np.uint32(gold_image[:,:,1]) * 2**8 + np.uint32(gold_image[:,:,2])).squeeze()

        return (image, gold_image)
def binarization(dirs):
    # Create binarization feature:
    # if pixel intensity greater than a threshold, assign it as 1, otherwise 0
    # This feature robust to light

    features = []  # store local feature descriptors

    for idir in range(len(dirs)):
        files = [name for name in os.listdir(dirs[idir]) 
                 if os.path.isfile(os.path.join(dirs[idir], name))]

        for ifile in range(len(files)):
            if files[ifile][-3:] != 'jpg':  # ignore non-image files
                continue
            image = mh.imread(dirs[idir]+files[ifile]).astype(np.uint8) # read image       
            if (len(image.shape) == 3):  # convert to gray if colored
                image = mh.colors.rgb2gray(image, dtype=np.uint8)

            # Calculate the binarization threshold using otsu method
            threshold = mh.thresholding.otsu(image)
            binarized = image > threshold

            # Calculate the ratio between area white and area black           
            nPix = float(image.shape[0]*image.shape[1])
            area_ratio = len(binarized[binarized == 1])/nPix

            features.append(area_ratio)

    features = np.array(features) 
    return features
Beispiel #9
0
def load_data(augmented=True, image_dirs=image_dirs):
    X = []
    y = []

    suff = 'A' if augmented else ''

    for i in range(len(image_dirs)):
        for pth in (Path.cwd() / image_dirs[i] / ('data' + str(img_rows) + suff)).iterdir():
            im = mh.imread(str(pth), True)
            X.append(im)
            y.append(i)

    p = np.random.permutation(len(X))

    X = [X[i] for i in p]
    y = [y[i] for i in p]

    if train_split == 0:  # Use everything both to train and validate
        X_train = np.array(X, np.uint8)
        y_train = np.array(y, np.uint8)
        X_test = X_train[::]
        y_test = y_train[::]
    else:
        train_size = int(len(y) * train_split)
        X_train = np.array(X[:train_size], np.uint8)
        y_train = np.array(y[:train_size], np.uint8)
        X_test = np.array(X[train_size:], np.uint8)
        y_test = np.array(y[train_size:], np.uint8)

    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')

    return img_to_float(X_train, y_train, X_test, y_test)
Beispiel #10
0
def executa_extracao_n(base_treino, metodo, n=1):
    inicio = time()    
    
    lista_imagens = arq.busca_arquivos(base_treino, "*.png")
    n_imgs_treino = len(lista_imagens)
    
    for lado in range(8,n+1,4):
        atributos = []    
        rotulos = []     
            
        arq_treino = base_treino + "base_PFTAS_"+str(lado)+"x"+str(lado)+".svm"
        ##  INICIO DO PROCESSO DE EXTRACAO DE ATRIBUTOS    
        
        for arq_imagem in lista_imagens: 
            print("Arquivo: " + arq_imagem)
            imagem = mh.imread(arq_imagem) 
            if (imagem != None):
                classe, _ = ex.classe_arquivo(arq_imagem)             
                print("executa_extracao_n - shape imagem:" + str(imagem.shape))
                # Extrai os atributos e gera os arquivos dos patches da base de treino
                atrs,rots = extrai_pftas_patches_n(imagem, classe, lado)                            
                atributos += atrs
                rotulos += rots
        
        dump_svmlight_file(atributos, rotulos, arq_treino)
    
    log("Extraidos atributos da base " + base_treino + " utilizando " + metodo + "\n para " + str(n_imgs_treino) + "imagens") 
  
    # Exibe o tempo de execução    
    log(str(time()-inicio) + "EXTRAÇÃO")     
Beispiel #11
0
def test_rgba():
    rgba = path.join(
                path.dirname(__file__),
                'data',
                'rgba.png')
    rgba = imread(rgba)
    assert np.all(np.diff(rgba[:,:,3].mean(1)) < 0 ) # the image contains an alpha gradient
def create_fullContour_labels():
    for purpose in ['train','validate','test']:
        img_search_string = '/media/vkaynig/Data1/Cmor_paper_data/labels/' + purpose + '/*.tif'
        img_files = sorted( glob.glob( img_search_string ) )
        
        for img_index in xrange(np.shape(img_files)[0]):
            print 'reading image ' + img_files[img_index] + '.'
            label = mahotas.imread(img_files[img_index])
            
            #membranes = np.logical_and(label[:,:,0]==0, label[:,:,1]==255)
            boundaries = label == -1
            boundaries[0:-1,:] = np.logical_or(boundaries[0:-1,:], np.diff(label, axis=0)!=0)
            boundaries[:,0:-1] = np.logical_or(boundaries[:,0:-1], np.diff(label, axis=1)!=0)

            membranes = np.logical_or(boundaries, label[:,:]==0) 

            shrink_radius=5
            y,x = np.ogrid[-shrink_radius:shrink_radius+1, -shrink_radius:shrink_radius+1]
            disc = x*x + y*y <= (shrink_radius ** 2)
            non_membrane = 1-mahotas.dilate(membranes, disc)

            img_file_name = os.path.basename(img_files[img_index])[:-4]+ '.tif'         
            outputPath = '/media/vkaynig/Data1/Cmor_paper_data/labels/'
            
            print 'writing image: ' + img_file_name
            mahotas.imsave(outputPath + 'background_fullContour/' + purpose + '/' + img_file_name, np.uint8(non_membrane*255))
            mahotas.imsave(outputPath + 'membranes_fullContour/' + purpose + '/' + img_file_name, np.uint8(membranes*255))
def normalize_all_input(img_search_string = '/media/vkaynig/NewVolume/IAE_ISBI2012/images/images/*.tif'):
    img_files = sorted( glob.glob( img_search_string ) )
    for fileName in img_files:
        img = mahotas.imread(fileName)
        clahe.clahe(img, img, 2.0)
 #       img = normalizeImage(img, saturation_level=0.05)
        mahotas.imsave(fileName, np.uint8(img))
def load_stack(folder_name, ifrom=None, ito=None):

    stack = None
    input_files = sorted(glob.glob(os.path.join(folder_name, '*')))

    input_files = [i for i in input_files if not i.endswith('.db')]

    if ifrom is not None:
        if ito is not None:
            input_files = input_files[ifrom:ito]
        else:
            input_files = input_files[ifrom:ifrom+1]

    for i, file_name in enumerate(input_files):

        if file_name.endswith('h5') or file_name.endswith('hdf5'):
            infile = h5py.File(file_name)
            im = infile['/labels'][...]
        else:
            im = mahotas.imread(file_name)
            if len(im.shape) == 3 and im.shape[2] == 3:
                im = np.int32(im[ :, :, 0 ]) * 2**16 + np.int32(im[ :, :, 1 ]) * 2**8 + np.int32(im[ :, :, 2 ])
            elif len(im.shape) == 3 and im.shape[2] == 4:
                im = np.int32(im[ :, :, 0 ]) * 2**16 + np.int32(im[ :, :, 1 ]) * 2**8 + np.int32(im[ :, :, 2 ]) + np.int32(im[ :, :, 3 ]) * 2**24

        if stack is None:
            stack = np.zeros((len(input_files), im.shape[0], im.shape[1]), dtype=im.dtype)
            print 'Stack size={0}, dtype={1}.'.format(stack.shape, stack.dtype)
        stack[i,:,:] = im

    return stack
Beispiel #15
0
    def __init__(self, path, scale_depth, scale_from=0, step_coeff=3):
        """
        Parameters
        ----------
        path : str
            path to an image
        scale_depth : int
            how many levels of blur to create
        scale_from : int
            from which level to start (0 = no blur at all)
        step_coeff : float

        """
        self.x, self.y, self.z = (0, 0, 0)
        self.filename = os.path.basename(path)
        self.image = []
        orig_image = mahotas.imread(path, as_grey=True)
        self.width = np.size(orig_image, 1)
        self.height = np.size(orig_image, 0)
        
        if scale_from == 0:
            self.image.append(orig_image)
        else:
            self.image.append(None)
        for i in range(1, scale_depth + 1):
            if i < scale_from:
                self.image.append(None)
            else:
                sigma = (step_coeff * i, step_coeff * i)
                print("sigma: %s" % (sigma,))
                self.image.append(ndimage.gaussian_filter(orig_image, sigma))
Beispiel #16
0
def exibe_cria_patches(diretorio, n_divs=3):
    lista_imagens = arq.busca_arquivos(diretorio, "*.png")
    #converte para escala de cinza
    for arquivo in lista_imagens:
        img = mh.imread(arquivo)            
        
        #patches = bp.patches(img, TAM_PATCH, rgb=True)
        img_cinza = cv2.imread(arquivo, cv2.IMREAD_GRAYSCALE)    
        #img_cinza = bp.limpa_imagem(img_cinza)                    
        #patches_cinza = bp.cria_patches(img_cinza, TAM_PATCH)        
        #patches_cinza = bp.patches(img_cinza, TAM_PATCH, rgb=False)        
        
        
        # exibe a imagem original    
        plt.imshow(img)                
        #patches = cria_patches3(img, n_divs, rgb=True)
        patches = cria_patches3(img, 32, rgb=True)
        print("Total de patches: %f", len(patches))
        print("Tamanho do patch: %i", patches[0].shape)
                    
        #y = int(math.sqrt(4**n_divs))
        y = int(math.sqrt(len(patches)))
        x = y 
        
        fig,axes = plt.subplots(x,y) 
        for i, ax in enumerate(axes.ravel()): 
            if (i == len(patches)):
                break;
            ax.xaxis.set_major_formatter(plt.NullFormatter())
            ax.yaxis.set_major_formatter(plt.NullFormatter())
            im = ax.imshow(patches[i],'gray')         
        plt.show()    
def test_determinant_zero():
    img = mahotas.imread(path.join(
        path.abspath(path.dirname(__file__)),
                    'data',
                    'determinant_zero.png'))
    points = surf(img, threshold=.0)
    assert type(points) == np.ndarray
def exibe_cria_patches(lista_imagens):
    #converte para escala de cinza
    for arquivo in lista_imagens:
        img = mh.imread(arquivo)            
        
        #patches = bp.patches(img, TAM_PATCH, rgb=True)
        img_cinza = cv2.imread(arquivo, cv2.IMREAD_GRAYSCALE)    
        #img_cinza = bp.limpa_imagem(img_cinza)                    
        #patches_cinza = bp.cria_patches(img_cinza, TAM_PATCH)        
        #patches_cinza = bp.patches(img_cinza, TAM_PATCH, rgb=False)        
        
        
        # exibe a imagem original    
        plt.imshow(img)        
        patches = [img]
        for n_div in range(1,2):        
            patches = cria_patches(patches, 1, rgb=True)
            print("Total de patches: %f", len(patches))
            print("Tamanho do patch: %i", patches[0].shape)
                    
            y = int(math.sqrt(4**n_div))
            x = y 
            
            #fig,axes = plt.subplots(x,y, figsize=(32,32))          
            fig,axes = plt.subplots(x,y) 
            for i, ax in enumerate(axes.ravel()): 
                ax.xaxis.set_major_formatter(plt.NullFormatter())
                ax.yaxis.set_major_formatter(plt.NullFormatter())
                im = ax.imshow(patches[i],'gray')         
            plt.show()    
Beispiel #19
0
def test_slic():
    f = mahotas.imread('mahotas/demos/data/luispedro.jpg')
    segmented, n = mahotas.segmentation.slic(f)
    assert segmented.shape == (f.shape[0], f.shape[1])
    assert segmented.max() == n
    segmented2, n2 = mahotas.segmentation.slic(f, 128)
    assert n2 < n
Beispiel #20
0
def handle_gfx_slice(offset, data):
    """
    Determine the image type, as indicated by its magic numer, of the slice.
    Create a image file with correct extension.
    Display the image using pylab.
    """
    print("\n=== ", offset, " ===", sep='')
    filtered_data = []
    for i in xrange(offset, len(data), 5):
        filtered_data.append(data[i])

    magic = ''.join("{:02x}".format(ord(c)) for c in filtered_data[:8:])
    if magic[0:8] == 'ffd8ffe0':
        ext = 'jpg'
    elif magic[0:16] == '89504e470d0a1a0a':
        ext = 'png'
    elif magic[0:12] == '474946383761' or magic[0:12] == '474946383961':
        ext = 'gif'
    else:
        ext = 'fail'
    print('magic:', magic)
    print('ext:', ext)

    # fixme: Factor this into a function
    with open('%s.%s' % (offset, ext), 'w') as out_file:
        for item in filtered_data:
            out_file.write(item)

    img = mh.imread("%s.%s" % (offset, ext))
    pylab.imshow(img)
    pylab.show()
def processAttributes_surf(filePattern):
	targets_data = []
	surf_features = []
	counter = 0
	for f in glob.glob(filePattern):
		counter+=1
		print 'Reading image: ', counter, f

		target = 1 if 'cat' in f else 0
		targets_data.append(target)
		
		image = mh.imread(f, as_grey=True)
		surf_features.append(surf.surf(image)[:, 5:])

	X_train_surf_features = np.concatenate(surf_features)
	
	# Clusters
	n_clusters = 300
	print 'Clustering', len(X_train_surf_features), 'features'
	estimator = MiniBatchKMeans(n_clusters=n_clusters)
	estimator.fit_transform(X_train_surf_features)

	x_data = []
	for instance in surf_features:
		clusters = estimator.predict(instance)
		features = np.bincount(clusters)
		if len(features) < n_clusters:
			features = np.append(features, np.zeros((1, n_clusters-len(features))))

		x_data.append(features)

	return x_data, targets_data
Beispiel #22
0
def extrai_haralick(lista_imgs, usa_descarte=False):
    atributos = []    
    rotulos = []    
    ref = patch_referencia() 
    hist_ref = bp.histograma(bp.aplica_lbp(ref)) 
    descartados = [] 
    
    for arquivo in lista_imgs:        
        # recupera do nome do arquivo a sua classe 
        classe, _ = classe_arquivo(arquivo) 
                
        #converte para escala de cinza 
        img_cinza = mh.imread(arquivo, as_grey=True)            
        
        if (usa_descarte):
            img_cinza = limpa_imagem(img_cinza)        
        patches = bp.cria_patches(img_cinza, TAM_PATCH)
        
        # calcula o histograma de cada um dos patches    
        for patch in patches:
            if (usa_descarte): 
                lbp_patch = bp.aplica_lbp(patch)
                hist = bp.histograma(lbp_patch)                  
                if (patch_valido(hist, hist_ref)):
                    glcm = mh.features.haralick(np.asarray(patch, dtype=np.uint8))
                    glcm = np.asarray(glcm).flatten()           
                    atributos.append(glcm)  
                    rotulos.append(CLASSES[classe])  
            else:                     
                    glcm = mh.features.haralick(np.asarray(patch, dtype=np.uint8))
                    glcm = np.asarray(glcm).flatten()           
                    atributos.append(glcm)  
                    rotulos.append(CLASSES[classe])                           
        
    return (atributos,rotulos)
Beispiel #23
0
def method2(image, sigma):
    image = mh.imread(image)[:, :, 0]
    image = mh.gaussian_filter(image, sigma)
    image = mh.stretch(image)
    binimage = image > mh.otsu(image)
    labeled, _ = mh.label(binimage)
    return labeled
Beispiel #24
0
def load(image_name, as_grey=None):
    '''
    Loads a demo image

    Parameters
    ----------
    image_name : str
        Name of one of the demo images
    as_grey : bool, optional
        Whether to convert to greyscale

    Returns
    -------
    im : ndarray
        Image
    '''
    from os import path
    import mahotas as mh
    _demo_images  = {
        'wally' : 'DepartmentStore.jpg',
        'departmentstore' : 'DepartmentStore.jpg',
        'lenna' : 'lena.jpg',
        'lena' : 'lena.jpg',
        'luispedro' : 'luispedro.jpg',
        'nuclear' : 'nuclear.png',
    }
    if image_name.lower() not in _demo_images:
        raise KeyError('mahotas.demos.load: Unknown demo image "{}", known images are {}'.format(image_name, list(_demo_images.keys())))

    image_name = image_path(_demo_images[image_name.lower()])
    return mh.imread(image_name, as_grey=as_grey)
Beispiel #25
0
def load_stack(folder_name):

    stack = None
    input_files = sorted(glob.glob(os.path.join(folder_name, '*')))

    for i, file_name in enumerate(input_files):

        print file_name

        if file_name.endswith('h5') or file_name.endswith('hdf5'):
            infile = h5py.File(file_name)
            im = infile['/probabilities'][...]
        else:
            im = mahotas.imread(file_name)
            if len(im.shape) == 3:
                im = np.uint32(im[ :, :, 0 ]) + np.uint32(im[ :, :, 1 ]) * 2**8 + np.uint32(im[ :, :, 2 ]) * 2**16

        if im.shape[0] > 400:
            im = im[60:60+400, 210:210+400]

        if rot != 0:
            im = np.rot90(im, rot)

        if stack is None:
            stack = np.zeros((len(input_files), im.shape[0], im.shape[1]), dtype=im.dtype)
            print 'Stack size={0}, dtype={1}.'.format(stack.shape, stack.dtype)
        stack[i,:,:] = im

        #print file_name

    return stack
Beispiel #26
0
def createMasterList():
	masterfile = open("metadata/master-data.txt",'w')
	f = []
	mypath = "images/1fps"
	f = []
	for i in range(309):
		f.append('x'+str(i+1)+'.jpg')
	print "Reading Files..."

	for image in f:
		imgname = 'images/1fps/'+image

		img = mahotas.imread(imgname, as_grey=True) # input image
		#extract description of all points of interest
		spoints = surf.surf(img, nr_octaves=4, nr_scales=6, initial_step_size=1, threshold=0.1, max_points=30, descriptor_only=True) 
		
		info = "" #string for each image information

		newList = [sum(attr)/len(attr) for attr in zip(*spoints)]

		for i in range(len(newList)):
			info+=str(i+1)+":"+str(newList[i])+" "
		info+="\n"

		masterfile.write(info)

	masterfile.close()

	print "Completed master feature list..."
Beispiel #27
0
def test_gaussian_filter():
    from scipy import ndimage
    f = mahotas.imread('mahotas/demos/data/luispedro.jpg', 1)
    for s in (4.,8.,12.):
        g = gaussian_filter(f, s)
        n = ndimage.gaussian_filter(f, s)
        assert np.max(np.abs(n - g)) < 1.e-5
def faces():
  from os import walk,path
  import numpy as np
  import mahotas as mh 
  from sklearn.cross_validation import train_test_split
  from sklearn.cross_validation import cross_val_score
  from sklearn.preprocessing import scale 
  from sklearn.decomposition import PCA 
  from sklearn.linear_model import LogisticRegression
  from sklearn.metrics import classification_report
  X = []
  y = []
  for dir_path,dir_names,file_names in walk('./data/att_faces'):
    for fn in file_names:
      if fn[-3:] == 'pgm':
        image_filename = path.join(dir_path,fn)
        X.append(scale(mh.imread(image_filename,as_grey=True).reshape(10304).astype('float32')))
        y.append(dir_path)
  X = np.array(X)
  X_train,X_test,y_train,y_test = train_test_split(X,y)
  pca = PCA(n_components = 150)
  X_train_reduced = pca.fit_transform(X_train)
  X_test_reduced = pca.transform(X_test)
  print 'original data were',X_train.shape
  print 'reduced is ',X_train_reduced.shape

  classifier = LogisticRegression()
  accuracies = cross_val_score(classifier,X_train_reduced,y_train)

  print 'cross val: ',np.mean(accuracies),accuracies
  classifier.fit(X_train_reduced,y_train)
  predictions = classifier.predict(X_test_reduced)
  print classification_report(y_test,predictions)
Beispiel #29
0
def extractFeatV2(image_file):
    region_feat = extractFeatV1(image_file)
    
    image = imread(image_file, as_grey=True)
    image = image.copy()

    # global features
    idx = np.nonzero(255-image)
    nonzero = image[idx]
    global_feat = [ 
        np.mean(nonzero),
        np.std(nonzero),
        kurtosis(nonzero),
        skew(nonzero),
        gini(nonzero,image_file),
    ]
    global_feat = np.asarray(global_feat, dtype='float32' )

    # concat all the features
    image2 = mh.imread(image_file, as_grey=True)
    haralick = mh.features.haralick(image2, ignore_zeros=False, preserve_haralick_bug=False, compute_14th_feature=False)
    lbp = mh.features.lbp(image2, radius=20, points=7, ignore_zeros=False)
    pftas = mh.features.pftas(image2)
    zernike_moments = mh.features.zernike_moments(image2, radius=20, degree=8)
    #surf_feat = surf.surf(image2)
    haralick = np.reshape(haralick,(np.prod(haralick.shape)))
    #surf_feat = np.reshape(surf_feat,(np.prod(surf_feat.shape)))
    
    #mh_feat = np.hstack((haralick, lbp, pftas, zernike_moments, surf_feat))
    mh_feat = np.hstack((haralick, lbp, pftas, zernike_moments))

    feat = np.hstack((global_feat, region_feat, mh_feat))
    
    return feat
Beispiel #30
0
def imread(input_file_name):
    image = mahotas.imread(input_file_name)
    #Check if it is on a 0-255 scale
    if image[0][0][0]<1 and image[0][0][0]>0:
        image=image*255
        image = image.astype('uint8')
    return image
Beispiel #31
0
def test_as_grey():
    filename = path.join(path.dirname(__file__), '..', 'demos', 'data',
                         'luispedro.jpg')
    im = mh.imread(filename, as_grey=1)
    assert im.ndim == 2
Beispiel #32
0
from pylab import imshow
import numpy as np
import mahotas
wally = mahotas.imread('DepartmentStore.jpg')

wfloat = wally.astype(float)
r,g,b = wfloat.transpose((2,0,1))

w = wfloat.mean(2)

pattern = np.ones((24, 16), float)
for i in xrange(2):
    pattern[i::4] = -1

v = mahotas.convolve(r-w, pattern)

mask = (v == v.max())
mask = mahotas.dilate(mask, np.ones((48, 24)))

# wally -= .8*wally * ~mask[:,:,None]
imshow(wally)
Beispiel #33
0
import numpy as np
import mahotas as mh
from . import regions
from . import pixel

histone = mh.imread('data/image_00_00_protein.png')
dna = mh.imread('data/image_00_00_dna.png')
rois = mh.imread('data/image_00_00_rois.png')

def test_rois_None():
    def test_function(f):
        features, labels = f(histone, dna, rois)
        features2 = f(histone, dna, None)
        assert np.all(features == features2)
        assert np.all(features == features2)
        assert len(features) == len(labels)
        assert np.abs(np.mean(rois > 0) - np.dot(features.T[0],labels)/np.sum(features.T[0])) < .05
    yield test_function, regions.hypersegmented_features
    yield test_function, pixel.pixel_features
    yield test_function, (lambda h,d,r : pixel.surf_grid(h,d,r,1.0))

Beispiel #34
0
# Set HoG parameters, number of bins, maximum angle, and number of pyramid levels
bins = 9
angle = 180
L = 3
num_spatial_bins = 3
phog_times = []
vhog_times = []
triggs_times = []
gpu_keypoint_times = []
gpu_patch_times = []
num_plots = 5

# Get timing info for each of the patch-based CPU versions
print "Timing the CPU versions ..."
for indx in range(len(img_sizes)):
    cur_image = imread(img_prefix + img_names[indx])
    if len(cur_image.shape) == 3:
        cur_image = clr.rgb2gray(cur_image)
    height, width = cur_image.shape

    st_time = time.time()
    p = phog.phog(img_prefix + img_names[indx], bins, angle, L,
                  [0, height, 0, width])
    ed_time = time.time() - st_time
    phog_times.append(ed_time)

    st_time = time.time()
    v = vhog.hog(cur_image, bins, num_spatial_bins, angle,
                 [0, height, 0, width])
    ed_time = time.time() - st_time
    vhog_times.append(ed_time)
Beispiel #35
0
def lowestCountourSnowDepth(imglist, datetimelist, mask, settings, logger,
                            objsize, light_threshold, sigma, bias):
    try:
        sigma = int(sigma)
        objsize = float(objsize)
        bias = float(bias)
        light_threshold = float(light_threshold)
    except:
        logger.set('Parameter error. Aborting.')
    if len(imglist) == 0:
        return False
    mask, pgs, th = mask
    if (isinstance(pgs[0], list)
            and len(pgs) != 1) or (not isinstance(pgs[0], list)
                                   and map(sum, [pgs]) == 0.0):
        logger.set(
            'Only and only one polygon should be defined for this analysis. Aborting.'
        )
        return False
    pgsx = []
    pgsy = []
    for i, c in enumerate(pgs):
        if i % 2 == 0:
            pgsx.append(c)
        else:
            pgsy.append(c)
    pbox = [min(pgsy), max(pgsy), min(pgsx), max(pgsx)]
    sd = []
    time = []
    for i, imgf in enumerate(imglist):
        try:
            img = mahotas.imread(imgf, as_grey=True)
            mbox = [
                pbox[0] * img.shape[0], pbox[1] * img.shape[0],
                pbox[2] * img.shape[1], pbox[3] * img.shape[1]
            ]
            mbox = map(int, map(np.rint, mbox))
            # mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'1.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]).astype(np.uint8))
            if sigma != 0:
                img = mahotas.gaussian_filter(img, sigma)
            # mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'2.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]).astype(np.uint8))
            img = (img <= light_threshold)
            # mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'3.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]*255).astype(np.uint8))
            img = img[mbox[0]:mbox[1], mbox[2]:mbox[3]]
            bottom = mbox[1] - mbox[0]
            # mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'4.jpg'),img.astype(np.uint8)*255)
            labeled, n = mahotas.label(img)
            bboxes = mahotas.labeled.bbox(labeled)
            bbheig = []
            if n == 0:
                height = np.nan
            else:
                for j, bbox in enumerate(bboxes[1:]):
                    height = objsize - objsize * bbox[1] / float(bottom)
                    height += bias
                    height = np.round(height * 100) / 100.0
                    bbheig.append(height)
                if bbheig == []:
                    height = np.nan
                else:
                    height = min(bbheig)
            time = np.append(time, (str(datetimelist[i])))
            sd = np.append(sd, height)
            logger.set('Image: |progress:4|queue:' + str(i + 1) + '|total:' +
                       str(len(imglist)))
        except Exception as e:
            print(e)
            logger.set("Processing " + imgf + " failed.")

    output = [["Snow Depth", ["Time", time, "Snow Depth", sd]]]
    return output
Beispiel #36
0
def salvatoriSnowCover(img_imglist, datetimelist, mask, settings, logger, red,
                       green, blue, middata, rectsw, extent, extent_proj, res,
                       dem, C, C_proj, Cz, hd, td, vd, f, w, interpolate, flat,
                       origin, ax, ay):
    rectsw = bool(float(rectsw))
    middata = bool(float(middata))
    dummyImg = False
    for img in img_imglist:
        try:
            mahotas.imread(img)
            dummyImg = img
            break
        except:
            pass
    if not dummyImg:
        logger.set("All images invalid.")
        return False
    if rectsw:
        logger.set("Obtaining weight mask...")
        params = map(np.copy, [
            extent, extent_proj, res, dem, C, C_proj, Cz, hd, td, vd, f, w,
            interpolate, flat, origin, ax, ay
        ])
        auxfilename = False
        from definitions import AuxDir, TmpDir
        readydata = False
        for hdf in os.listdir(AuxDir):
            if "SNOWCOV001" in hdf:
                try:
                    auxF = h5py.File(os.path.join(AuxDir, hdf), 'r')
                    readyfile = True
                    for i in range(len(params)):
                        attr = auxF['metadata'].attrs["param" + str(i)]
                        if np.prod(np.array([attr]).shape) == 1:
                            if (attr != params[i]):
                                readyfile = False
                        else:
                            if (attr != params[i]).any():
                                readyfile = False
                    if readyfile:
                        logger.set(
                            "Calculation has done before with same parameters, auxillary info is being read from file..."
                        )
                        tiles = np.copy(auxF['metadata'][...]).tolist()
                        for d in auxF:
                            if str(d) == 'metadata':
                                continue
                            varname = str(d).split()[0]
                            tilename = str(d).split()[1]
                            if len(tiles) == 1:
                                exec(varname + "= np.copy(auxF[d])")
                            else:
                                if varname not in locals():
                                    exec(varname + '=None')
                                exec(varname + "=writeData(np.copy(auxF[d])," +
                                     varname +
                                     ",map(int,tilename.split('-')))[0]")
                        auxF.close()
                        logger.set("\tRead.")
                        readydata = True
                        auxfilename = hdf
                        break
                    auxF.close()
                except:
                    try:
                        auxF.close()
                    except:
                        continue
        if not readydata:
            Wp = Georectify1([dummyImg], [datetimelist[0]], mask, settings,
                             logger, extent, extent_proj, res, dem, C, C_proj,
                             Cz, hd, td, vd, f, w, interpolate, flat, origin,
                             ax, ay)[0][1][5]
            logger.set('Writing results for next run...')
            auxfilename = 'SNOWCOV001_' + str(uuid4()) + '.h5'
            auxF = h5py.File(os.path.join(AuxDir, auxfilename), 'w')
            tiles = [[0, 0, Wp.shape[0], Wp.shape[1]]]
            auxF.create_dataset('metadata', data=np.array(tiles))
            for i, p in enumerate(params):
                auxF['metadata'].attrs.create("param" + str(i), p)
            for i, tile in enumerate(tiles):
                Wp_ = readData(Wp, tile)[0]
                auxF.create_dataset('Wp ' + str(tile).replace(
                    ', ', '-').replace('[', '').replace(']', ''),
                                    Wp_.shape,
                                    data=Wp_)
                Wp_ = None
            auxF.close()
        Wp = Wp[::-1]
    else:
        Wp = np.ones(mahotas.imread(dummyImg).shape[:2])
    mask, pgs, th = mask
    mask = LensCorrRadial(mask, '0', logger, origin, ax, ay, 0)[0][1][1]
    Wp *= (mask.transpose(2, 0, 1)[0] == 1)
    if np.mean(mask) == 1:
        logger.set("Weightmask quality: " + str(
            np.sum(Wp[-100:, Wp.shape[1] / 2 - 50:Wp.shape[1] / 2 + 50] != 0) /
            10000))
    else:
        logger.set("Weightmask quality: " +
                   str(1 - np.sum((Wp == 0) *
                                  (mask.transpose(2, 0, 1)[0] == 1)) /
                       float(np.sum((mask.transpose(2, 0, 1)[0] == 1)))))
    logger.set("Calculating snow cover fractions...")
    scr = []
    ssr = []
    snr = []
    mar = []

    scn = []
    ssn = []
    snn = []
    man = []

    time = []
    thr = []
    thg = []
    thb = []

    Wp_full = deepcopy(Wp)
    for i_img, imgf in enumerate(img_imglist):
        try:
            snow = 0
            nosnow = 0
            img = mahotas.imread(imgf)
            if mask.shape != img.shape:
                mask = maskers.polymask(img, pgs, logger)
                Wp = mahotas.imresize(Wp_full, img.shape[:2])
            (img,
             thv) = salvatoriSnowDetect(img, mask * maskers.thmask(img, th),
                                        settings, logger, red, green, blue)
            # mimg = np.dstack((img==1,img==0,img==2)).astype(np.uint8)*255
            if -1 in thv:
                continue
            time = np.append(time, (str(datetimelist[i_img])))
            img = LensCorrRadial(img, str(datetimelist[i_img]), logger, origin,
                                 ax, ay, 0)[0][1][1]
            snow = np.sum(((img == 1) * Wp).astype(int))
            nosnow = np.sum(((img == 0) * Wp).astype(int))
            masked = np.sum(((img == 2) * Wp).astype(int))
            scr = np.append(scr, snow / float(snow + nosnow))
            if middata:
                ssr = np.append(ssr, snow)
                snr = np.append(snr, nosnow)
                mar = np.append(mar, masked)
                snow = np.sum(((img == 1)).astype(int))
                nosnow = np.sum(((img == 0)).astype(int))
                masked = np.sum(((img == 2)).astype(int))
                scn = np.append(scn, snow / float(snow + nosnow))
                ssn = np.append(ssn, snow)
                snn = np.append(snn, nosnow)
                man = np.append(man, masked)
                thr = np.append(thr, thv[0])
                thg = np.append(thg, thv[1])
                thb = np.append(thb, thv[2])
        except Exception as e:
            print(e)
            logger.set("Processing " + imgf + " failed.")
        logger.set('Image: |progress:4|queue:' + str(i_img + 1) + '|total:' +
                   str(len(img_imglist)))
    scr = np.round(scr * 100).astype(np.int32)
    scn = np.round(scn * 100).astype(np.int32)
    if middata:
        return [[
            "Snow Cover Fraction",
            [
                "Time", time, "Snow Cover Fraction", scr,
                "Snow Cover Fraction - Non-Rectified", scn, "Threshold - Red",
                thr, "Threshold - Green", thg, "Threshold - Blue", thb,
                "Snow - Rectified", ssr, "Nosnow - Rectified", snr,
                "Masked - Rectified", mar, "Snow - Non-Rectified", ssn,
                "Nosnow - Non-Rectified", snn, "Masked - Non-Rectified", man
            ]
        ]]
    else:
        return [[
            "Snow Cover Fraction", ["Time", time, "Snow Cover Fraction", scr]
        ]]
Beispiel #37
0
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report

X = []
y = []

for dir_path, dir_names, file_names in walk(
        'data/att-faces/orl_faces'
):  # This path is incorrect so the code doesnt execute
    for fn in file_names:
        if fn[-3:] == 'pgm':
            image_filename = path.join(dir_path, fn)
            X.append(
                scale(
                    mh.imread(image_filename,
                              as_grey=True).reshape(10304).astype('float32')))
            y.append(dir_path)
X = np.array(X)
X_train, X_test, y_train, y_test = train_test_split(X, y)

pca = PCA(n_components=150)
X_train_reduced = pca.fit_transform(X_train)
X_test_reduced = pca.transform(X_test)

print 'The original dimenstions of the training data were ', X_train.shape
print 'The reduced dimenstion of the training data are ', X_train_reduced.shape

classifier = LogisticRegression()
accuracies = cross_val_score(classifier, X_train_reduced, y_train)

print("Cross validation accuracy: ", np.mean(accuracies))
Beispiel #38
0
import mahotas
import numpy as np
from pylab import imshow, gray, show, subplot
from os import path

luispedro_image = path.join(path.dirname(path.abspath(__file__)), 'data',
                            'luispedro.jpg')

photo = mahotas.imread(luispedro_image, as_grey=True)
photo = photo.astype(np.uint8)

gray()
subplot(131)
imshow(photo)

T_otsu = mahotas.otsu(photo)
print T_otsu
subplot(132)
imshow(photo > T_otsu)

T_rc = mahotas.rc(photo)
print T_rc
subplot(133)
imshow(photo > T_rc)
show()
Beispiel #39
0
import numpy as np
import pylab
import mahotas as mh

dna = mh.imread('images/dna.jpeg')
pen = mh.imread('images/Penguins.jpg')

#pylab.imshow(pen)
#pylab.show()

# dnaf = mh.gaussian_filter(dna, 8)
#change type to int for otsu to work
# dnaf = dnaf.astype('uint8')
# T = mh.thresholding.otsu(dnaf)
# pylab.imshow(dnaf > T)
# pylab.show()

dnaf = mh.gaussian_filter(dna, 8)
rmax = mh.regmax(dnaf)

pylab.imshow(mh.overlay(dna, rmax))
pylab.show()

pylab.imshow(dnaf)
pylab.show()
# labeled, nr_objects = mh.label(dnaf > T)
# print "nr_objects: {}".format(nr_objects)
# pylab.imshow(labeled)
# pylab.jet()
# pylab.show()
Beispiel #40
0
            segmentation_files = sorted(glob.glob(imagedir + '/*.png'))
            print 'Found {0} segmentations in directory {1}.'.format(
                len(segmentation_files), di)

            if pi == 0 and di == 0:
                out_hdf5 = h5py.File(output_path, 'w')
                segmentations = out_hdf5.create_dataset(
                    'segmentations', (imshape[0], imshape[1],
                                      len(segmentation_files), len(seg_dirs)),
                    dtype=np.bool,
                    chunks=(256, 256, 1, 1),
                    compression='gzip')

            for fi in range(len(segmentation_files)):

                seg = mahotas.imread(segmentation_files[fi]) == 0

                segmentations[input_areas[pi][0]:input_areas[pi][1],
                              input_areas[pi][2]:input_areas[pi][3], fi,
                              di] = seg

    figure(figsize=(20, 20))
    imshow(segmentations[:, :, 10, 10], cmap=cm.gray)

    out_hdf5.close()
    print "Success"

except Exception as e:
    print e
    raise
Beispiel #41
0
"""No linter no cry."""

import numpy
import pylab
import mahotas as mh

image1 = mh.imread('i1.jpg')
image2 = mh.imread('i2.jpg')
bcg1 = mh.imread('b1.jpg')
bcg2 = mh.imread('b2.jpg')

# TODO gauss does make pictures gray. why?
# p = 4
# image1 = mh.gaussian_filter(image1, p)
# image2 = mh.gaussian_filter(image2, p)
# bcg1 = mh.gaussian_filter(bcg1, p)
# bcg2 = mh.gaussian_filter(bcg2, p)

assert len(image1) == len(image2) == len(bcg1) == len(bcg2)
assert len(image1[0]) == len(image2[0]) == len(bcg1[0]) == len(bcg2[0])
numLines = len(image1)
numPixel = len(image1[0])

result = []
for line in range(0, numLines):
    result.append([])
    for pix in range(0, numPixel):
        i1 = image1[line][pix]
        i2 = image2[line][pix]
        b1 = bcg1[line][pix]
        b2 = bcg2[line][pix]
Beispiel #42
0
def edginess_sobel_from_fname(fname):
    from edginess import edginess_sobel
    return edginess_sobel(mh.imread(fname, as_grey=True))
def features_for(im):
    im = mh.imread(im, as_grey=True).astype(np.uint8)
    return mh.features.haralick(im).mean(0)
Beispiel #44
0
    output = T.addbroadcast(output, 0)
    output = output.squeeze()
    output = output.flatten(2)
    output = T.nnet.softmax(output.T).T

    return output.reshape((2, output_shape[2], output_shape[3]))


if __name__ == '__main__':
    rng = numpy.random.RandomState(929292)

    import mahotas
    import matplotlib.pyplot as plt
    #CPU
    #image = mahotas.imread('ac3_input_0141.tif')
    image = mahotas.imread('test-input-1.tif')
    imageSize = 1024
    image = image[0:imageSize, 0:imageSize]

    start_time = time.clock()
    image = normalizeImage(image) - 0.5

    #GPU
    image_shared = theano.shared(
        np.float64(image))  #theano.shared(np.float64(image))
    image_shared = image_shared.reshape((1, 1, imageSize, imageSize))

    x = T.matrix('x')

    classifier = CNN(input=x,
                     batch_size=imageSize,
Beispiel #45
0
haralicks = []
labels = []
chists = []

print(
    'This script will test (with cross-validation) classification of the simple 3 class dataset'
)
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))

# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
for fname in sorted(images):
    imc = mh.imread(fname)
    haralicks.append(texture(mh.colors.rgb2grey(imc)))
    chists.append(color_histogram(imc))

    # Files are named like building00.jpg, scene23.jpg...
    labels.append(fname[:-len('xx.jpg')])

print('Finished computing features.')

haralicks = np.array(haralicks)
labels = np.array(labels)
chists = np.array(chists)

haralick_plus_chists = np.hstack([chists, haralicks])

# We use Logistic Regression because it achieves high accuracy on small(ish) datasets
import scipy.ndimage
import scipy.misc
import numpy as np
import gzip
import cPickle
import glob
import os
import h5py
import partition_comparison

#param_path = 'D:/dev/Rhoana/membrane_cnn/results/good3/'
#param_path = 'D:/dev/Rhoana/membrane_cnn/results/stumpin/'
param_path = 'D:/dev/Rhoana/membrane_cnn/results/stump_combo/'
param_files = glob.glob(param_path + "*.h5")

target_boundaries = mahotas.imread(param_path + 'boundaries.png') > 0

offset_max = 32

target_boundaries = target_boundaries[offset_max:-offset_max,
                                      offset_max:-offset_max]
target_segs = np.uint32(mahotas.label(target_boundaries)[0])

param_files = [x for x in param_files if x.find('.ot.h5') == -1]

blur_radius = 3
y, x = np.ogrid[-blur_radius:blur_radius + 1, -blur_radius:blur_radius + 1]
disc = x * x + y * y <= blur_radius * blur_radius

for remove_i in range(len(param_files) + 1):
basedir = './SimpleImageDataset/'

haralicks = []
sobels = []
labels = []

print('This script will test (with cross-validation) classification of the simple 3 class dataset')
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))

# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
for fname in sorted(images):
    im = mh.imread(fname, as_grey=True)
    haralicks.append(texture(im))
    sobels.append(edginess_sobel(im))

    # Files are named like building00.jpg, scene23.jpg...
    labels.append(fname[:-len('xx.jpg')])

print('Finished computing features.')

haralicks = np.array(haralicks)
sobels = np.array(sobels)
labels = np.array(labels)

# We use logistic regression because it is very fast.
# Feel free to experiment with other classifiers
scores = cross_validation.cross_val_score(LogisticRegression(), haralicks, labels, cv=5)
def prediction_full_patch_spine(patchSize=572,
                                patchSize_out=388,
                                patchZ=23,
                                patchZ_out=1,
                                writeImage=True,
                                returnValue=True):

    start_time = time.time()
    pathPrefix = 'DIR_PATH/'
    img_search_string_grayImages = pathPrefix + 'images/validate/*.png'
    img_search_string_membraneImages = pathPrefix + 'spinemasks/validate/*.png'
    img_files_gray = sorted(glob.glob(img_search_string_grayImages))
    img_files_membrane = sorted(glob.glob(img_search_string_membraneImages))

    #load model
    print 'Read the model for evaluation'
    model = model_from_json(open('3d_unet_spine.json').read())
    model.load_weights('3d_unet_spine_weights.h5')

    cropSize = (patchSize - patchSize_out) / 2
    csZ = (patchZ - patchZ_out) / 2

    img = mahotas.imread(
        img_files_gray[0]
    )  #read the first image to get imformation about the shape
    grayImages = np.zeros(
        (np.shape(img_files_gray)[0], img.shape[0], img.shape[1]))
    labelImages = np.zeros(
        (np.shape(img_files_gray)[0], img.shape[0], img.shape[1]),
        dtype=np.int8)
    probImages = np.zeros(
        (np.shape(img_files_gray)[0] - 2 * csZ, img.shape[0] - 2 * cropSize,
         img.shape[1] - 2 * cropSize))

    print 'Total number of full size test images:', np.shape(img_files_gray)[0]
    read_order = range(np.shape(img_files_gray)[0])
    for img_index in read_order:
        img = mahotas.imread(img_files_gray[img_index])
        img = normalizeImage(img)
        img = img - 0.5
        grayImages[img_index, :, :] = img
        img_label = mahotas.imread(img_files_membrane[img_index]) / 255
        labelImages[img_index, :, :] = np.int8(img_label)

    numSample_axis = int((img.shape[0] - 2 * cropSize) / patchSize_out) + 1
    numSample_patch = numSample_axis**2
    numZ = float(len(img_files_gray) - 2 * csZ) / float(patchZ_out)
    numZ = int(math.ceil(numZ))
    nsamples = numSample_patch * numZ
    print 'Number of inputs for this block:', nsamples

    grayImg_set = np.zeros((nsamples, patchZ, patchSize, patchSize))
    membrane_set = np.zeros(
        (nsamples, patchZ_out, patchSize_out, patchSize_out))

    print 'Total number of probability maps:', len(img_files_gray) - 2 * csZ
    numProb = len(img_files_gray) - 2 * csZ

    num_total = 0
    for zIndex in range(numZ):
        if zIndex == numZ - 1:
            zStart = numProb - patchZ_out
        else:
            zStart = patchZ_out * zIndex

        for xIndex in range(numSample_axis - 1):
            xStart = patchSize_out * xIndex
            for yIndex in range(numSample_axis - 1):
                yStart = patchSize_out * yIndex
                grayImg_set[num_total] = grayImages[zStart:zStart + patchZ,
                                                    xStart:xStart + patchSize,
                                                    yStart:yStart + patchSize]
                num_total += 1

        xStart = img.shape[0] - patchSize
        for yIndex in range(numSample_axis - 1):
            yStart = patchSize_out * yIndex
            grayImg_set[num_total] = grayImages[zStart:zStart + patchZ,
                                                xStart:xStart + patchSize,
                                                yStart:yStart + patchSize]
            num_total += 1

        yStart = img.shape[1] - patchSize
        for xIndex in range(numSample_axis - 1):
            xStart = patchSize_out * xIndex
            grayImg_set[num_total] = grayImages[zStart:zStart + patchZ,
                                                xStart:xStart + patchSize,
                                                yStart:yStart + patchSize]
            num_total += 1

        xStart = img.shape[0] - patchSize
        yStart = img.shape[1] - patchSize
        grayImg_set[num_total] = grayImages[zStart:zStart + patchZ,
                                            xStart:xStart + patchSize,
                                            yStart:yStart + patchSize]
        num_total += 1

    for val_ind in range(num_total):
        data_x = grayImg_set[val_ind].astype(np.float32)
        data_x = np.reshape(data_x, [-1, 1, patchZ, patchSize, patchSize])
        im_pred = model.predict(x=data_x, batch_size=1)
        membrane_set[val_ind] = np.reshape(
            im_pred, (patchZ_out, patchSize_out, patchSize_out))

    num_total = 0
    for zIndex in range(numZ):
        if zIndex == numZ - 1:
            zStart = numProb - patchZ_out
        else:
            zStart = patchZ_out * zIndex

        for xIndex in range(numSample_axis - 1):
            xStart = patchSize_out * xIndex
            for yIndex in range(numSample_axis - 1):
                yStart = patchSize_out * yIndex
                probImages[zStart:zStart + patchZ_out,
                           xStart:xStart + patchSize_out, yStart:yStart +
                           patchSize_out] = membrane_set[num_total]
                num_total += 1

        xStart = (numSample_axis - 1) * patchSize_out
        for yIndex in range(numSample_axis - 1):
            yStart = patchSize_out * yIndex
            probImages[zStart:zStart + patchZ_out, xStart:, yStart:yStart +
                       patchSize_out] = membrane_set[num_total, :,
                                                     xStart - img.shape[0] +
                                                     2 * cropSize:, :]
            num_total += 1

        yStart = (numSample_axis - 1) * patchSize_out
        for xIndex in range(numSample_axis - 1):
            xStart = patchSize_out * xIndex
            probImages[zStart:zStart + patchZ_out,
                       xStart:xStart + patchSize_out,
                       yStart:] = membrane_set[num_total, :, :, yStart -
                                               img.shape[0] + 2 * cropSize:]
            num_total += 1

        xStart = (numSample_axis - 1) * patchSize_out
        yStart = (numSample_axis - 1) * patchSize_out
        probImages[zStart:zStart + patchZ_out, xStart:,
                   yStart:] = membrane_set[num_total, :, xStart -
                                           img.shape[0] + 2 * cropSize:,
                                           yStart - img.shape[0] +
                                           2 * cropSize:]
        num_total += 1

    if writeImage:
        print 'Store images'
        for imgIndex in range(numProb):
            scipy.misc.imsave(
                pathPrefix + "result/prediction_" + str("%04d" % imgIndex) +
                ".tif", probImages[imgIndex])

    end_time = time.time()
    total_time = (end_time - start_time)

    print 'Running time: ', total_time / 60.
    print 'finished the prediction'

    if returnValue:
        newMembrane = np.zeros((numProb, (img.shape[0] - 2 * cropSize)**2))
        newProb_set = np.zeros((numProb, (img.shape[0] - 2 * cropSize)**2))
        for i in range(numProb):
            newMembrane[i] = crop_image_layer(labelImages[i + csZ, :, :],
                                              cropSize).flatten()
            newProb_set[i] = probImages[i].flatten()

        newMembrane = newMembrane.astype(np.int)
        return newProb_set, newMembrane
Beispiel #49
0
def salvatoriSnowOnCanopy(img_imglist, datetimelist, mask, settings, logger,
                          threshold, middata):
    middata = bool(float(middata))
    threshold = float(threshold)
    mask, pgs, th = mask
    logger.set("Calculating snow on canopy...")
    scr = []
    ssr = []
    snr = []
    mar = []

    time = []
    thb = []

    for i_img, imgf in enumerate(img_imglist):
        try:
            snow = 0
            nosnow = 0
            img = mahotas.imread(imgf)
            if mask.shape != img.shape:
                mask = maskers.polymask(img, pgs, logger)
            (img, thv) = salvatoriSnowDetect2(img,
                                              mask * maskers.thmask(img, th),
                                              settings, logger)
            mimg = np.dstack(
                (img == 1, img == 0, img == 2)).astype(np.uint8) * 255
            if thv is -1:
                continue
            time = np.append(time, (str(datetimelist[i_img])))
            snow = np.sum(((img == 1)).astype(int))
            nosnow = np.sum(((img == 0)).astype(int))
            masked = np.sum(((img == 2)).astype(int))
            scr = np.append(scr, snow / float(snow + nosnow))
            if middata:
                ssr = np.append(ssr, snow)
                snr = np.append(snr, nosnow)
                mar = np.append(mar, masked)
                thb = np.append(thb, thv)
        except Exception as e:
            print(e)
            logger.set("Processing " + imgf + " failed.")
        logger.set('Image: |progress:4|queue:' + str(i_img + 1) + '|total:' +
                   str(len(img_imglist)))
    scr = np.round(scr * 100).astype(np.int32)
    if middata:
        return [[
            "Snow Cover Fraction",
            [
                "Time", time, "Snow on canopy",
                (scr > threshold).astype(np.int32), "Snow Cover Fraction", scr,
                "Threshold", thb, "Snow", ssr, "Nosnow", snr, "Masked", mar
            ]
        ]]
    else:
        return [[
            "Snow Cover Fraction",
            [
                "Time", time, "Snow on canopy",
                (scr > threshold).astype(np.int32)
            ]
        ]]
Beispiel #50
0
import numpy as np
import mahotas as mh
image = mh.imread('scene00.jpg')
from matplotlib import pyplot as plt
plt.imshow(image)
plt.show()
image = mh.colors.rgb2grey(image, dtype=np.uint8)
plt.imshow(image)  # Display the image
plt.gray()
thresh = mh.thresholding.otsu(image)
print('Otsu threshold is {}.'.format(thresh))
# Otsu threshold is 138.
plt.imshow(image > thresh)

im16 = mh.gaussian_filter(image, 16)
im = mh.demos.load('lenna')

r, g, b = im.transpose(2, 0, 1)
r12 = mh.gaussian_filter(r, 12.)
g12 = mh.gaussian_filter(g, 12.)
b12 = mh.gaussian_filter(b, 12.)
im12 = mh.as_rgb(r12, g12, b12)
h, w = r.shape  # height and width
Y, X = np.mgrid[:h, :w]
Y = Y - h / 2.  # center at h/2
Y = Y / Y.max()  # normalize to -1 .. +1

X = X - w / 2.
X = X / X.max()

C = np.exp(-2. * (X**2 + Y**2))
import numpy as np
import scipy
import pylab
import pymorph
import mahotas
from scipy import ndimage

dna = mahotas.imread('dna.jpeg')
#pylab.imshow(dna)
#pylab.show()

print dna.shape
print dna.dtype
print dna.max()
print dna.min()

#pylab.imshow(dna // 2)
#pylab.show()

dnaf = ndimage.gaussian_filter(dna,16)
T = mahotas.thresholding.otsu(dnaf)
#pylab.imshow(dnaf>T)
#pylab.gray()
#pylab.show()

# label cells
labeled,nr_objects = ndimage.label(dnaf>T)
print nr_objects
#pylab.imshow(labeled)
#pylab.jet()
#pylab.show()
Beispiel #52
0
import numpy as np
import mahotas as mh
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report

if __name__ == '__main__':
    X = []
    y = []
    for path, subdirs, files in os.walk('data/English/Img/GoodImg/Bmp/'):
        for filename in files:
            f = os.path.join(path, filename)
            target = filename[3:filename.index('-')]
            img = mh.imread(f, as_grey=True)
            if img.shape[0] <= 30 or img.shape[1] <= 30:
                continue
            img_resized = mh.imresize(img, (30, 30))
            if img_resized.shape != (30, 30):
                img_resized = mh.imresize(img_resized, (30, 30))
            X.append(img_resized.reshape((900, 1)))
            y.append(target)
    X = np.array(X)
    X = X.reshape(X.shape[:2])
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1)
    pipeline = Pipeline([('clf', SVC(kernel='rbf', gamma=0.01, C=100))])
    parameters = {
        'clf__gamma': (0.01, 0.03, 0.1, 0.3, 1),
        'clf__C': (0.1, 0.3, 1, 3, 10, 30),
    }
from os import walk
from sklearn.preprocessing import scale
import os

print 'Loading the images...'
# TODO change path
X = []
y = []
for (dirpath, dirnames, filenames) in walk(
        '/home/gavin/PycharmProjects/mastering-machine-learning/ch-reduction/data/faces94/male'
):
    for fn in filenames:
        if fn[-3:] == 'jpg':
            image_filename = os.path.join(dirpath, fn)
            X.append(
                scale(mh.imread(image_filename, as_grey=True).reshape(36000)))
            y.append(fn[:fn.index('.')])

X = np.array(X)
X_train, X_test, y_train, y_test = train_test_split(X, y)

n_components = 150
print 'Reducing the dimensions...'
pca = PCA(n_components=n_components)
X_train_reduced = pca.fit_transform(X_train)
X_test_reduced = pca.transform(X_test)
print X_train_reduced.shape
print X_test_reduced.shape

print 'Training the classifier...'
classifier = LogisticRegression()
                   X, kmeans_model.labels_, metric='euclidean')))
plt.show()

## Image Compression

# image quantization
#   a lossy compression method that replaces a range
#   similar colors (within-cluster members) in an image
#   with a single color. it reduces the size of the image file.
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
import mahotas as mh
# import utilities
original_img = np.array(mh.imread('image_quantization.png'),
                        dtype=np.float64) / 255
original_dimensions = tuple(original_img.shape)
width, height, depth = tuple(original_img.shape)
image_flattened = np.reshape(original_img, (width * height, depth))
# flatten image
image_array_sample = shuffle(image_flattened, random_state=0)[:1000]
estimator = KMeans(n_clusters=64, random_state=0)
estimator.fit(image_array_sample)
# create 64 clusters from a sample of 1000 randomly
# selected colors.
cluster_assignments = estimator.predict(image_flattened)
compressed_palette = estimator.cluster_centers_
compressed_img = np.zeros((width, height, compressed_palette.shape[1]))
label_idx = 0
for i in range(width):
Beispiel #55
0
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License

import numpy as np
import mahotas as mh
image = mh.imread('../1400OS_10_01.jpeg')
image = mh.colors.rgb2gray(image, dtype=np.uint8)
thresh = mh.thresholding.otsu(image)
print(thresh)
otsubin = (image > thresh)
mh.imsave('otsu-threshold.jpeg', otsubin.astype(np.uint8) * 255)
otsubin = ~mh.close(~otsubin, np.ones((15, 15)))
mh.imsave('otsu-closed.jpeg', otsubin.astype(np.uint8) * 255)

thresh = mh.thresholding.rc(image)
print(thresh)
mh.imsave('rc-threshold.jpeg', (image > thresh).astype(np.uint8) * 255)
Beispiel #56
0
import mahotas
import numpy as np
from pylab import imshow, show

f = mahotas.imread('mahotas/demos/data/nuclear.png')
f = f[:,:,0]
imshow(f)
show()

f = mahotas.gaussian_filter(f, 4)
f = (f> f.mean())
imshow(f)
show()

labeled, n_nucleus  = mahotas.label(f)
print('Found {} nuclei.'.format(n_nucleus))
imshow(labeled)
show()
sizes = mahotas.labeled.labeled_size(labeled)
too_big = np.where(sizes > 10000)
labeled = mahotas.labeled.remove_regions(labeled, too_big)
imshow(labeled)
show()

labeled = mahotas.labeled.remove_bordering(labeled)
imshow(labeled)
show()

relabeled, n_left = mahotas.labeled.relabel(labeled)
print('After filtering and relabeling, there are {} nuclei left.'.format(n_left))
imshow(relabeled)
import numpy as np
import mahotas as mh

text = mh.imread("../Dataset/simple-dataset/text21.jpg")
scene = mh.imread("../Dataset/simple-dataset/scene00.jpg")
h, w, _ = text.shape
canvas = np.zeros((h, 2 * w + 128, 3), np.uint8)
canvas[:, -w:] = scene
canvas[:, :w] = text
canvas = canvas[::4, ::4]
mh.imsave('../charts/1400OS_10_10.jpg', canvas)
Beispiel #58
0
    sorted_image = np.sort(np.uint8(original_image).ravel())
    minval = np.float32(sorted_image[len(sorted_image) *
                                     (saturation_level / 2)])
    maxval = np.float32(sorted_image[len(sorted_image) *
                                     (1 - saturation_level / 2)])
    norm_image = np.float32(original_image - minval) * (255 /
                                                        (maxval - minval))
    norm_image[norm_image < 0] = 0
    norm_image[norm_image > 255] = 255
    if invert:
        norm_image = 255 - norm_image
    return np.uint8(norm_image)


input_image = np.float32(
    normalize_image(mahotas.imread(input_image_path),
                    invert=(not image_inverted)))

if image_downsample_factor != 1:
    input_image = mahotas.imresize(input_image, image_downsample_factor)

average_image = combo_net.apply_combo_net(input_image)


def write_image(output_path, data, image_num=0, downsample=1):
    if downsample != 1:
        data = np.float32(mahotas.imresize(data, downsample))
    maxdata = np.max(data)
    mindata = np.min(data)
    normdata = (np.float32(data) - mindata) / (maxdata - mindata)
    mahotas.imsave(output_path, np.uint16(normdata * 65535))
basedir = 'simple-dataset/'


def features_for(im):
    im = mh.imread(im, as_grey=True).astype(np.uint8)
    return mh.features.haralick(im).mean(0)


features = []
sobels = []
labels = []
images = glob('{}/*.jpg'.format(basedir))
for im in images:
    features.append(features_for(im))
    sobels.append(edginess_sobel(mh.imread(im, as_grey=True)))
    labels.append(im[:-len('00.jpg')])

features = np.array(features)
labels = np.array(labels)

n = features.shape
nl = labels.shape

print('features=' + str(n))
print(str(features))
print('labels=' + str(nl))
print(str(labels))

scores = cross_validation.cross_val_score(LogisticRegression(),
                                          features,
# 3 Class
#class_colors = [[255,0,0], [0,255,0], [0,0,255]]
#class_colors = [[255,85,255], [255,255,0], [0,255,255]]

# class_colors = [0, 1]

nclass = len(class_colors)

training_x = np.zeros((0,0), dtype=np.float32)
training_y = np.zeros((0,1), dtype=np.int32)

print 'Found {0} training images.'.format(len(files))

# Loop through all images
for file in files:
	training_image = mahotas.imread(file)

	for classi in range(nclass):

		this_color = class_colors[classi]

		# Find pixels for this class
		# class_indices = np.nonzero(np.logical_and(
		# 	training_image[:,:,this_color] > training_image[:,:,(this_color + 1) % 3],
		# 	training_image[:,:,this_color] > training_image[:,:,(this_color + 2) % 3]))

		class_indices = np.nonzero(np.logical_and(
			training_image[:,:,0] == this_color[0],
			training_image[:,:,1] == this_color[1],
			training_image[:,:,2] == this_color[2]))