コード例 #1
0
 def test_tv_denoise_2d(self):
     """
     Apply the TV denoising algorithm on the lena image provided
     by scipy
     """
     # lena image
     lena = color.rgb2gray(data.lena())[:256, :256]
     # add noise to lena
     lena += 0.5 * lena.std() * np.random.randn(*lena.shape)
     # clip noise so that it does not exceed allowed range for float images.
     lena = np.clip(lena, 0, 1)
     # denoise
     denoised_lena = filter.tv_denoise(lena, weight=60.0)
     # which dtype?
     assert denoised_lena.dtype in [np.float, np.float32, np.float64]
     from scipy import ndimage
     grad = ndimage.morphological_gradient(lena, size=((3, 3)))
     grad_denoised = ndimage.morphological_gradient(
         denoised_lena, size=((3, 3)))
     # test if the total variation has decreased
     assert np.sqrt(
         (grad_denoised ** 2).sum()) < np.sqrt((grad ** 2).sum()) / 2
     denoised_lena_int = filter.tv_denoise(img_as_uint(lena),
                                           weight=60.0, keep_type=True)
     assert denoised_lena_int.dtype is np.dtype('uint16')
コード例 #2
0
def do_watershed(image, markers,  tfile, shape, bstruct, algorithm, mg_size, use_ww_wl, wl, ww, q):
    mask = np.memmap(tfile, shape=shape, dtype='uint8', mode='r+')
                
    if use_ww_wl:
        if algorithm == 'Watershed':
            tmp_image = ndimage.morphological_gradient(
                           get_LUT_value(image, ww, wl).astype('uint16'),
                           mg_size)
            tmp_mask = watershed(tmp_image, markers.astype('int16'), bstruct)
        else:
            tmp_image = get_LUT_value(image, ww, wl).astype('uint16')
            #tmp_image = ndimage.gaussian_filter(tmp_image, self.config.mg_size)
            #tmp_image = ndimage.morphological_gradient(
                           #get_LUT_value(image, ww, wl).astype('uint16'),
                           #self.config.mg_size)
            tmp_mask = watershed_ift(tmp_image, markers.astype('int16'), bstruct)
    else:
        if algorithm == 'Watershed':
            tmp_image = ndimage.morphological_gradient((image - image.min()).astype('uint16'), mg_size)
            tmp_mask = watershed(tmp_image, markers.astype('int16'), bstruct)
        else:
            tmp_image = (image - image.min()).astype('uint16')
            #tmp_image = ndimage.gaussian_filter(tmp_image, self.config.mg_size)
            #tmp_image = ndimage.morphological_gradient((image - image.min()).astype('uint16'), self.config.mg_size)
            tmp_mask = watershed_ift(tmp_image, markers.astype('int8'), bstruct)
    mask[:] = tmp_mask
    mask.flush()
    q.put(1)
コード例 #3
0
ファイル: util.py プロジェクト: GaelVaroquaux/tomo-tv
def compute_sparsity(im):
    l_x = len(im)
    X, Y = np.ogrid[:l_x, :l_x]
    mask = ((X - l_x/2)**2 + (Y - l_x/2)**2 <= (l_x/2)**2)
    grad1 = ndimage.morphological_gradient(im, footprint=np.ones((3, 3)))
    grad2 = ndimage.morphological_gradient(im, footprint=ndimage.generate_binary_structure(2, 1))
    return (grad1[mask] > 0).mean(), (grad2[mask] > 0).mean() 
コード例 #4
0
ファイル: styles.py プロジェクト: eraleman/invesalius3
    def expand_watershed(self, pubsub_evt):
        markers = self.matrix
        image = self.viewer.slice_.matrix
        self.viewer.slice_.do_threshold_to_all_slices()
        mask = self.viewer.slice_.current_mask.matrix[1:, 1:, 1:]
        ww = self.viewer.slice_.window_width
        wl = self.viewer.slice_.window_level
        if BRUSH_BACKGROUND in markers and BRUSH_FOREGROUND in markers:
            tmp_image = ndimage.morphological_gradient(get_LUT_value(image, ww, wl).astype('uint16'), self.mg_size)
            tmp_mask = watershed(tmp_image, markers)

            if self.viewer.overwrite_mask:
                mask[:] = 0
                mask[tmp_mask == 1] = 253
            else:
                mask[(tmp_mask==2) & ((mask == 0) | (mask == 2) | (mask == 253))] = 2
                mask[(tmp_mask==1) & ((mask == 0) | (mask == 2) | (mask == 253))] = 253

            #mask[:] = tmp_mask
            self.viewer.slice_.current_mask.matrix[0] = 1
            self.viewer.slice_.current_mask.matrix[:, 0, :] = 1
            self.viewer.slice_.current_mask.matrix[:, :, 0] = 1

            self.viewer.slice_.discard_all_buffers()
            self.viewer.slice_.current_mask.clear_history()
            Publisher.sendMessage('Reload actual slice')
コード例 #5
0
def plot_wireframe(image, n_bubble=1):
    
    # Position the scan upright, 
    # so the head of the patient would be at the top facing the camera
    #p = image.transpose(2,1,0)
    R = measure.regionprops(image)
    print 'total bubbles', len(R)
    top_n = sorted([(r.area, r.label) for r in R])[::-1][:n_bubble]
    top_n_R = [R[i-1] for a,i in top_n]
    neighborhood = ndimage.morphology.generate_binary_structure(3, 2)

    fig = plt.figure(figsize=(10, 10))
    ax = fig.add_subplot(111, projection='3d')
    for r in top_n_R:
        print r.label, r.area
        p = r.image
        outline = ndimage.morphological_gradient(p, structure=neighborhood)
        print outline.shape
        ax.plot_wireframe(np.where(outline))

    ax.set_xlim(0, p.shape[0])
    ax.set_ylim(0, p.shape[1])
    ax.set_zlim(0, p.shape[2])

    plt.show()
コード例 #6
0
def test_denoise_tv_chambolle_2d():
    # astronaut image
    img = astro_gray.copy()
    # add noise to astronaut
    img += 0.5 * img.std() * np.random.rand(*img.shape)
    # clip noise so that it does not exceed allowed range for float images.
    img = np.clip(img, 0, 1)
    # denoise
    denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1)
    # which dtype?
    assert_(denoised_astro.dtype in [np.float, np.float32, np.float64])
    from scipy import ndimage as ndi
    grad = ndi.morphological_gradient(img, size=((3, 3)))
    grad_denoised = ndi.morphological_gradient(denoised_astro, size=((3, 3)))
    # test if the total variation has decreased
    assert_(grad_denoised.dtype == np.float)
    assert_(np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()))
コード例 #7
0
def test_denoise_tv_chambolle_2d():
    # lena image
    img = lena_gray
    # add noise to lena
    img += 0.5 * img.std() * np.random.random(img.shape)
    # clip noise so that it does not exceed allowed range for float images.
    img = np.clip(img, 0, 1)
    # denoise
    denoised_lena = restoration.denoise_tv_chambolle(img, weight=60.0)
    # which dtype?
    assert denoised_lena.dtype in [np.float, np.float32, np.float64]
    from scipy import ndimage

    grad = ndimage.morphological_gradient(img, size=((3, 3)))
    grad_denoised = ndimage.morphological_gradient(denoised_lena, size=((3, 3)))
    # test if the total variation has decreased
    assert grad_denoised.dtype == np.float
    assert np.sqrt((grad_denoised ** 2).sum()) < np.sqrt((grad ** 2).sum()) / 2
コード例 #8
0
 def test_tv_denoise_2d(self):
     """
     Apply the TV denoising algorithm on the lena image provided
     by scipy
     """
     # lena image
     lena = color.rgb2gray(data.lena())
     # add noise to lena
     lena += 0.5 * lena.std()*np.random.randn(*lena.shape)
     # denoise
     denoised_lena = filter.tv_denoise(lena, weight=60.0)
     # which dtype?
     assert denoised_lena.dtype in [np.float, np.float32, np.float64]
     from scipy import ndimage
     grad = ndimage.morphological_gradient(lena, size=((3,3)))
     grad_denoised = ndimage.morphological_gradient(denoised_lena, size=((3,3)))
     # test if the total variation has decreased
     assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum()) / 2
     denoised_lena_int = filter.tv_denoise(lena.astype(np.int32), \
             weight=60.0, keep_type=True)
     assert denoised_lena_int.dtype is np.dtype('int32')
コード例 #9
0
ファイル: util.py プロジェクト: UriAceves/bp-for-tomo
def compute_best_J(im):
    # A more precise computation of sisj could be done...
    l_x = len(im)
    X, Y = np.ogrid[:l_x, :l_x]
    mask = ((X - l_x/2)**2 + (Y - l_x/2)**2 <= (l_x/2)**2)
    grad = ndimage.morphological_gradient(im, footprint=np.ones((3, 3)))
    sisj_average = 1 - (grad[mask] > 0).mean()
    J1 = np.arctanh(sisj_average)
    grad2 = np.abs(np.diff(im, axis=0))[:, :-1] + np.abs(np.diff(im, axis=1))[:-1]
    sisj_average = 1 - 2*(grad2[mask[:-1, :-1]] > 0).mean()
    J2 = np.arctanh(sisj_average)
    return J1, J2
コード例 #10
0
ファイル: preprocess.py プロジェクト: wmorrill/kaggle
def separate_lungs(image, return_list=None, iteration=-1):
    """
    This only takes in a 2D slice to make he lung segmentation and takes really long to run.
    But supposedly will get all corner cases. Not sure if mask from this is very good.
    Looks like the mask might be too dilated.

    :param image:
    :param return_list:
    :param iteration:
    :return:
    """
    #Creation of the markers as shown above:
    marker_internal, marker_external, marker_watershed = generate_markers(
        image)

    #Creation of the Sobel-Gradient
    sobel_filtered_dx = ndimage.sobel(image, 1)
    sobel_filtered_dy = ndimage.sobel(image, 0)
    sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
    sobel_gradient *= 255.0 / np.max(sobel_gradient)

    #Watershed algorithm
    watershed = morphology.watershed(sobel_gradient, marker_watershed)

    #Reducing the image created by the Watershed algorithm to its outline
    outline = ndimage.morphological_gradient(watershed, size=(3, 3))
    outline = outline.astype(bool)

    #Performing Black-Tophat Morphology for reinclusion
    #Creation of the disk-kernel and increasing its size a bit
    blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0],
                       [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1],
                       [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0],
                       [0, 0, 1, 1, 1, 0, 0]]
    blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
    #Perform the Black-Hat
    outline += ndimage.black_tophat(outline, structure=blackhat_struct)

    #Use the internal marker and the Outline that was just created to generate the lungfilter
    lungfilter = np.bitwise_or(marker_internal, outline)
    #Close holes in the lungfilter
    #fill_holes is not used here, since in some slices the heart would be reincluded by accident
    lungfilter = ndimage.morphology.binary_closing(lungfilter,
                                                   structure=np.ones((5, 5)),
                                                   iterations=3)

    # #Apply the lungfilter (note the filtered areas being assigned -2000 HU)
    # segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)))
    if iteration >= 0 and return_list:
        return_list[iteration] = lungfilter
    else:
        return lungfilter
コード例 #11
0
ファイル: dsbowl_utils.py プロジェクト: ericsolo/python
def get_segmented_lungs(image):
    #Creation of the markers as shown above:
    marker_internal, marker_external, marker_watershed = generate_markers(image)
    
    #Creation of the Sobel-Gradient
    sobel_filtered_dx = ndimage.sobel(image, 1)
    sobel_filtered_dy = ndimage.sobel(image, 0)
    sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
    sobel_gradient *= 255.0 / np.max(sobel_gradient)
    
    #Watershed algorithm
    watershed = morphology.watershed(sobel_gradient, marker_watershed)
    
    #Reducing the image created by the Watershed algorithm to its outline
    outline = ndimage.morphological_gradient(watershed, size=(3,3))
    outline = outline.astype(bool)
    
    #Performing Black-Tophat Morphology for reinclusion
    #Creation of the disk-kernel and increasing its size a bit
    blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
                       [0, 1, 1, 1, 1, 1, 0],
                       [1, 1, 1, 1, 1, 1, 1],
                       [1, 1, 1, 1, 1, 1, 1],
                       [1, 1, 1, 1, 1, 1, 1],
                       [0, 1, 1, 1, 1, 1, 0],
                       [0, 0, 1, 1, 1, 0, 0]]
    #blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
    blackhat_struct = ndimage.iterate_structure(blackhat_struct, 14) # <- retains more of the area, 12 works well. Changed to 14, 12 still excluded some parts.
    #Perform the Black-Hat
    outline += ndimage.black_tophat(outline, structure=blackhat_struct)
    
    #Use the internal marker and the Outline that was just created to generate the lungfilter
    lungfilter = np.bitwise_or(marker_internal, outline)
    #Close holes in the lungfilter
    #fill_holes is not used here, since in some slices the heart would be reincluded by accident
    lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5,5)), iterations=3)
    
    #Apply the lungfilter (note the filtered areas being assigned threshold_min HU)
    segmented = np.where(lungfilter == 1, image, threshold_min*np.ones(image.shape))
    
    #return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
    return segmented
コード例 #12
0
ファイル: styles.py プロジェクト: eraleman/invesalius3
    def OnBrushRelease(self, evt, obj):
        n = self.viewer.slice_data.number
        self.viewer.slice_.discard_all_buffers()
        if self.orientation == 'AXIAL':
            image = self.viewer.slice_.matrix[n]
            mask = self.viewer.slice_.current_mask.matrix[n+1, 1:, 1:]
            self.viewer.slice_.current_mask.matrix[n+1, 0, 0] = 1
            markers = self.matrix[n]

        elif self.orientation == 'CORONAL':
            image = self.viewer.slice_.matrix[:, n, :]
            mask = self.viewer.slice_.current_mask.matrix[1:, n+1, 1:]
            self.viewer.slice_.current_mask.matrix[0, n+1, 0]
            markers = self.matrix[:, n, :]

        elif self.orientation == 'SAGITAL':
            image = self.viewer.slice_.matrix[:, :, n]
            mask = self.viewer.slice_.current_mask.matrix[1: , 1:, n+1]
            self.viewer.slice_.current_mask.matrix[0 , 0, n+1]
            markers = self.matrix[:, :, n]


        ww = self.viewer.slice_.window_width
        wl = self.viewer.slice_.window_level
        
        if BRUSH_BACKGROUND in markers and BRUSH_FOREGROUND in markers:
            tmp_image = ndimage.morphological_gradient(get_LUT_value(image, ww, wl).astype('uint16'), self.mg_size)
            tmp_mask = watershed(tmp_image, markers)

            if self.viewer.overwrite_mask:
                mask[:] = 0
                mask[tmp_mask == 1] = 253
            else:
                mask[(tmp_mask==2) & ((mask == 0) | (mask == 2) | (mask == 253))] = 2
                mask[(tmp_mask==1) & ((mask == 0) | (mask == 2) | (mask == 253))] = 253


            self.viewer.slice_.current_mask.was_edited = True
            self.viewer.slice_.current_mask.clear_history()
            Publisher.sendMessage('Reload actual slice')
        else:
            self.viewer.OnScrollBar(update3D=False)
コード例 #13
0
def get_training_data( file_img, file_mask, r ):
    # create mask
    input_mask = irtk.imread( file_mask )
    x_min, y_min, z_min, x_max, y_max, z_max = (input_mask == 0).bbox()

    background = irtk.zeros( input_mask.get_header(), dtype='uint8' )
    background[z_min:z_max+1,
               y_min:y_max+1,
               x_min:x_max+1] = 1
    background = nd.morphological_gradient( background, size=7)
    n = background[z_min+1:z_max,
                   y_min+1:y_max,
                   x_min+1:x_max].sum()
    z = np.random.randint(low=0, high=input_mask.shape[0],size=1.25*n)
    y = np.random.randint(low=0, high=input_mask.shape[1],size=1.25*n)
    x = np.random.randint(low=0, high=input_mask.shape[2],size=1.25*n)
    background[z,y,x] = 1
    background[z_min+1:z_max,
               y_min+1:y_max,
               x_min+1:x_max] = 0
    
    foreground = (input_mask == 1).astype('uint8')

    new_mask = irtk.zeros( input_mask.get_header(), dtype='uint8' )
    new_mask[foreground == 1] = 1
    new_mask[background != 0] = 2

    img = irtk.imread( file_img, dtype='float32' )
    
    X = []
    Y = []

    for z in xrange(img.shape[0]):
        YX = np.transpose( np.nonzero( foreground[z] ) )
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
        print patches.shape, YX.shape
        X.extend( patches )
        Y.extend( [1]*len(YX) )

    for z in xrange(img.shape[0]):
        YX = np.transpose( np.nonzero( background[z] ) )
        if DEBUG:
            YX = YX[::10]
        else:
            YX = YX[::2]
        if YX.shape[0] == 0:
            continue
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )
        print patches.shape, YX.shape
        X.extend( patches )
        Y.extend( [0]*len(YX) )

    return X, Y
コード例 #14
0
def mask_image( file_img, file_mask, ga, r, neigh, output_dir ):
    img = irtk.imread( file_img, dtype='float32' )

    input_mask = irtk.imread( file_mask )
    
    print "predicting..."
    res = irtk.zeros( img.get_header(), dtype='float32' )
    res2 = irtk.zeros( img.get_header(), dtype='float32' )
    res3 = irtk.zeros( img.get_header(), dtype='float32' )
    res4 = irtk.zeros( img.get_header(), dtype='uint8' )
    mask = irtk.ones( input_mask.get_header(), dtype='uint8' )
    mask[input_mask == 2] = 0
    for z in xrange(img.shape[0]):
        print z
        YX = np.transpose( np.nonzero( mask[z] ) )
        if YX.shape[0] == 0:
            continue # this slice does not intersect the box
        patches = extract_patches2D( img[z], r, YX )
        patches = np.reshape( patches, (patches.shape[0],patches.shape[1]*patches.shape[2]) )

        predictions = neigh.predict_proba(patches)[:,1]
        res[z,YX[:,0],YX[:,1]] = predictions

    x_min, y_min, z_min, x_max, y_max, z_max = mask.bbox()

    proba = res[z_min:z_max+1,
                y_min:y_max+1,
                x_min:x_max+1]

    if args.mass:
        BV = get_BV( args.ga )
        box_volume = (z_max-z_min)*img.header['pixelSize'][2]*(y_max-y_min)*img.header['pixelSize'][1]*(x_max-x_min)*img.header['pixelSize'][0]
        ratio = float(BV) / float(box_volume)
        print "ratio", ratio
        q0,q1 = mquantiles( proba.flatten(), prob=[0.5*(1.0-ratio),
                                                   1.0-0.5*ratio] )
        print "threshold", q0,q1
        #threshold = max(0.5,threshold)
    
        # labels = res[z_min:z_max+1,
        #              y_min:y_max+1,
        #              x_min:x_max+1] > threshold
        
    #res = 1 / (np.exp(-(res-threshold)/(res.max()-res.min())))

        res[res<q0] = q0
        res[res>q1] = q1
        res -= res.min()
        res /= res.max()

    labels = res[z_min:z_max+1,
                 y_min:y_max+1,
                 x_min:x_max+1] > 0.5
   
    proba = res[z_min:z_max+1,
                y_min:y_max+1,
                x_min:x_max+1]
    
    cropped_img = img[z_min:z_max+1,
                      y_min:y_max+1,
                      x_min:x_max+1]

    if args.do_3D:
        labels = irtk.crf( cropped_img,
                           labels,
                           proba,
                           l=args.l,
                           sigma=get_noiseXY(cropped_img),
                           sigmaZ=get_noiseZ(cropped_img) )
    # elif args.do_patchZ:
    #     labels = irtk.crf_patchZ( cropped_img,
    #                               labels,
    #                               proba,
    #                               l=10.0 )   
    # else:
    #     for z in xrange(z_min,z_max+1):
    #         labels[z] = irtk.crf( cropped_img[z],
    #                               labels[z],
    #                               proba[z],
    #                               l=1.0 )

    print "MAX LABEL:", labels.max()
    irtk.imwrite(output_dir + "/bare_"+os.path.basename(file_img), labels )
    tmp = irtk.zeros( img.get_header(), dtype='uint8' )
    tmp[z_min:z_max+1,
        y_min:y_max+1,
        x_min:x_max+1] = labels
    ( min_x_bare, min_y_bare, min_z_bare,
      max_x_bare, max_y_bare, max_z_bare ) = tmp.bbox()
    
    if not args.no_cleaning:
        # clean by fitting ellipses enlarged of 10%
        for z in xrange(labels.shape[0]):
            edges = nd.morphological_gradient( labels[z] > 0,size=5 )
            points = np.transpose(edges.nonzero())[:,::-1]
            if len(points) == 0:
                continue
            points = np.array(map(lambda x:[x],points),dtype='int32')
            ellipse = cv2.fitEllipse(points)
            cv2.ellipse( labels[z], (ellipse[0],
                                     (1.1*ellipse[1][0],1.1*ellipse[1][1]),
                                     ellipse[2]) , 1, -1 )

    irtk.imwrite(output_dir + "/seg_"+os.path.basename(file_img), labels )
    irtk.imwrite(output_dir + "/res_"+os.path.basename(file_img), res )

    # re-read the image in case we processed it
    img = irtk.imread( file_img, dtype='float32' )
    cropped_img = img[z_min:z_max+1,
                      y_min:y_max+1,
                      x_min:x_max+1]
    cropped_img[labels==0] = -1
    masked = cropped_img.bbox(crop=True)
    irtk.imwrite(output_dir + "/masked_"+os.path.basename(file_img), masked )

    # re-read the image in case we processed it
    img = irtk.imread( file_img, dtype='float32' )    
    x0 = min_x_bare + (max_x_bare - min_x_bare) / 2
    y0 = min_y_bare + (max_y_bare - min_y_bare) / 2
    ofd = get_OFD(ga)/img.header['pixelSize'][0]

    cropped_img = img[min_z_bare:max_z_bare+1,
                      max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
                      max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()

    irtk.imwrite(output_dir + "/very_large_"+os.path.basename(file_img),
                 cropped_img )
    
    cropped_proba = res[min_z_bare:max_z_bare+1,
                        max(0,int(round(y0-ofd/2))):min(img.shape[1],int(round(y0+ofd/2+1))),
                        max(0,int(round(x0-ofd/2))):min(img.shape[2],int(round(x0+ofd/2+1)))].copy()

    irtk.imwrite(output_dir + "/proba_"+os.path.basename(file_img),
                 cropped_proba )    
コード例 #15
0
ファイル: hw3.py プロジェクト: crbates/ay250
def extract_features(image_path_list):
    '''
    This function takes a list of image paths and computes several features of each 
    image in order to classify them. This include both basic properties like size 
    and aspect ratio as well as doing object recognition and counting. This runs at
    about 1/s/core on a quad-core with HT 2.93 GHz i7.
    '''
    feature_list = []
    name_list = []
    file_list = []
    #iterate through all the image paths
    for image_path in image_path_list:
        image_array = imread(image_path)
        feature = []
        feature.append(image_array.size)
        shape = image_array.shape
        #check if the image isblack or white 
        if len(shape) > 2:
            feature.append(1)
            #convert color images to grey so they can be compared with greyscale ones
            image_array = color.rgb2grey(image_array)
            '''
            # Can't use these because there is nothing comparable for black and
            # white images
            feature.append(sum(sum(image_array[:,:,0])))        
            feature.append(sum(sum(image_array[:,:,1])))
            feature.append(sum(sum(image_array[:,:,2])))
            hsv = color.rgb2hsv(img_as_float(image_array))              
            feature.append(sum(sum(hsv[:,:,0])))        
            feature.append(sum(sum(hsv[:,:,1])))        
            feature.append(sum(sum(hsv[:,:,2])))
            '''
        else:
            feature.append(0)
            #print "bw: ", image_path
        #determine basic image shape properties
        feature.append(shape[0])
        feature.append(shape[1])
        feature.append(shape[0]/shape[1])
        #compute the amount of different shades of grey and their ratios
        black = np.average(image_array.flat <= 0.25 )
        darkgrey = np.average((image_array.flat > 0.25) & (image_array.flat <= 0.5))
        lightgrey = np.average((image_array.flat > 0.5) & (image_array.flat <= 0.75)) 
        white = np.average(image_array.flat > 0.75)
        feature.append(black)
        feature.append(darkgrey)
        feature.append(lightgrey)
        feature.append(white)
        feature.append(black/(white+1))
        feature.append(lightgrey/(darkgrey+1))
        feature.append(lightgrey/(black+1))
        
        # compute the average of several common filter outputs
        feature.append(np.average(flt.sobel(image_array)))
        feature.append(np.average(ndimage.morphological_gradient(image_array, size=(2,2))))
        feature.append(np.average(flt.prewitt(image_array)))
        feature.append(np.average(flt.canny(image_array)))
        
        #Use the canny filter to delineate object and then count the objects and 
        #their average size
        p = flt.canny(image_array)
        #plt.imshow(p,cmap=plt.cm.gray,interpolation='nearest')
        #plt.show()
        labels, count = ndimage.label(p)
        area = np.sum((labels>0))/count        
        feature.append(area)
        feature.append(count)
        
        #determine the filename for the results file and the image type for the 
        #training set.
        filename = image_path.split("/")[-1]
        file_list.append(filename)
        image = filename.split('_')[0]        
        name_list.append(image)        
        feature_list.append(feature)
    return name_list, feature_list, file_list