示例#1
0
 def imstretch(self):
     data = np.clip(self.data_array, self.threshold[0], self.threshold[1])
     if self.mode == "linear":
         pass
     elif self.mode == "logarithmic":
         data = np.reciprocal(1 + np.power(0.5 / data, self.factor))
     elif self.mode == "gamma":
         data = np.power(data, self.factor)
     elif self.mode == "arcsinh":
         mn = np.nanmin(data)
         mx = np.nanmax(data)
         tmp = bytescale(data, high=1.0)
         beta = np.clip(self.factor, 0.0, self.factor)
         sclbeta = (beta - mn) / (mx - mn)
         sclbeta = np.clip(sclbeta, 1.0e-12, sclbeta)
         nonlinearity = 1.0 / sclbeta
         extrema = np.arcsinh(np.array([0.0, nonlinearity]))
         data = np.clip(np.arcsinh(data * nonlinearity), extrema[0], extrema[1])
     elif self.mode == "square root":
         data = np.sqrt(np.fabs(data)) * np.sign(data)
     elif self.mode == "histogram equalization":
         imhist, bins = np.histogram(data.flatten(), 256, normed=True)
         cdf = imhist.cumsum()  # cumulative distribution function
         cdf = 255 * cdf / cdf[-1]  # normalize
         im2 = np.interp(data.flatten(), bins[:-1], cdf)
         data = im2.reshape(data.shape)
     self.scaled = bytescale(data).flatten().tolist()
示例#2
0
 def test_bytescale_keywords(self):
     x = np.array([40, 60, 120, 200, 300, 500])
     res_lowhigh = misc.bytescale(x, low=10, high=143)
     assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
     res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
     assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
     assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
示例#3
0
 def test_bytescale(self):
     x = np.array([0, 1, 2], np.uint8)
     y = np.array([0, 1, 2])
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         assert_equal(misc.bytescale(x), x)
         assert_equal(misc.bytescale(y), [0, 128, 255])
示例#4
0
 def test_bytescale_keywords(self):
     x = np.array([40, 60, 120, 200, 300, 500])
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         res_lowhigh = misc.bytescale(x, low=10, high=143)
         assert_equal(res_lowhigh, [10, 16, 33, 56, 85, 143])
         res_cmincmax = misc.bytescale(x, cmin=60, cmax=300)
         assert_equal(res_cmincmax, [0, 0, 64, 149, 255, 255])
         assert_equal(misc.bytescale(np.array([3, 3, 3]), low=4), [4, 4, 4])
    def getProjection(self, numFrame):
        """ Get projections onto yoz and xoz plane from xoy image """
        
        xoyImg, _ = self.getGestureRegion(numFrame)
        
        heightLen, widthLen = xoyImg.shape
        depthLen = 256
        size = xoyImg.size
        
        corMap = np.zeros((size, 3))
        corX = np.reshape(corMap[:, 0], (size, 1))
        corY = np.reshape(corMap[:, 1], (size, 1))
        corZ = np.reshape(xoyImg, (size, 1), 'F')   # using Fortran-like index order
        
        # generate x and y coordinates
        for i in range(widthLen):
            startIdx = i * heightLen
            endIdx = startIdx + heightLen
            
            corX[startIdx : endIdx] = np.ones((heightLen, 1)) * i
            tmpArray = np.array(range(0, heightLen))    # generate matrix [0-480]
            corY[startIdx : endIdx] = np.reshape(tmpArray, (tmpArray.size, 1))
            
#         corMap[:, 0] = corX
#         corMap[:, 1] = corY
#         corMap[:, 2] = corZ
        
        corMap = hstack([corX, corY, corZ])
        
        ##
        thresh = 10
        selectedCorMap = corMap[np.where(corMap[:, 2] > thresh)]
        selectedCorMap = selectedCorMap.astype(int)
        
        # yoz and xoz image
        xozImg = np.zeros((depthLen, widthLen))
        yozImg = np.zeros((heightLen, depthLen))
        
        rowNum, _ = selectedCorMap.shape
        
        for i in range(rowNum):
            xozImg[selectedCorMap[i, 2], selectedCorMap[i, 0]] = xozImg[selectedCorMap[i, 2], selectedCorMap[i, 0]] + 1
            yozImg[selectedCorMap[i, 1], selectedCorMap[i, 2]] = yozImg[selectedCorMap[i, 1], selectedCorMap[i, 2]] + 1
        
        xozImg = bytescale(xozImg)  # scale to 0-255
        yozImg = bytescale(yozImg)  # scale to 0-255
        
        return xozImg, yozImg
示例#6
0
 def test_bytescale_rounding(self):
     a = np.array([-0.5, 0.5, 1.5, 2.5, 3.5])
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         actual = misc.bytescale(a, cmin=0, cmax=10, low=0, high=10)
     expected = [0, 1, 2, 3, 4]
     assert_equal(actual, expected)
示例#7
0
 def test_bytescale_low_equals_high(self):
     a = np.arange(3)
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         actual = misc.bytescale(a, low=10, high=10)
     expected = [10, 10, 10]
     assert_equal(actual, expected)
示例#8
0
 def test_bytescale_cscale_lowhigh(self):
     a = np.arange(10)
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         actual = misc.bytescale(a, cmin=3, cmax=6, low=100, high=200)
     expected = [100, 100, 100, 100, 133, 167, 200, 200, 200, 200]
     assert_equal(actual, expected)
def pibayerraw(fn,exposure_sec,bit8):
    with PiCamera() as cam: #load camera driver
        print('camera startup gain autocal')
        #LED automatically turns on, this turns it off
        cam.led = False
        sleep(0.75) # somewhere between 0.5..0.75 seconds to let camera settle to final gain value.
        setparams(cam,exposure_sec) #wait till after sleep() so that gains settle before turning off auto
        getparams(cam)
        counter = 1
#%% main loop
        while True:
#            tic = time()
            img10 = grabframe(cam)
#            print('{:.1f} sec. to grab frame'.format(time()-tic))
#%% linear scale 10-bit to 8-bit
            if bit8:
                img = bytescale(img10,0,1024,255,0)
            else:
                img = img10
#%% write to PNG or JPG or whatever based on file extension
            max_value = img.max()
            print(max_value)
            if max_value > 50:
                idx = unravel_index(img.argmax(), img.shape)
                xidx = idx[0]
                yidx = idx[1]
                print(xidx, yidx)
                xlow = max(0, xidx-25)
                ylow = max(0, yidx-25)
                xhi = min(1944, xidx+25)
                yhi = min(2592, yidx+25)
                imsave(fn+'%03d' % counter + '.png',img[xlow:xhi,ylow:yhi])
                counter = counter + 1
#                break
    return img
示例#10
0
def make_pc_images(pca, shape):

    U = REACT2D.build_dct(shape[0], shape[1], 50)

    pca_images = np.empty((npca, shape[0], shape[1], 3))

    pca_images[:, :, :, 0] = pca.components_[:, :ncoefs].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))
    pca_images[:, :, :, 1] = pca.components_[:, ncoefs:2*ncoefs].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))
    pca_images[:, :, :, 2] = pca.components_[:, 2*ncoefs:].dot(U.T[:ncoefs, :]).reshape((npca, shape[0], shape[1]))

    npca_rows = 3
    npca_cols = 3
    nplots = 2

    pca_idx = 0
    for plot in range(nplots):
        idx = 1
        plt.clf()
        for row in range(npca_rows):
            for col in range(npca_cols):
                print row, col, idx
                plt.subplot(npca_rows, npca_cols, idx)
                plt.imshow(bytescale(pca_images[pca_idx, :, :, :]))
                plt.title('PC ' + str(pca_idx + 1))
                plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
                plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
                idx += 1
                pca_idx += 1
        plt.savefig(plot_dir + 'PC_Images_' + str(plot + 1) + '.png')
        if doshow:
            plt.show()
示例#11
0
 def test_bytescale_mask(self):
     a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True])
     actual = misc.bytescale(a)
     expected = [0, 255, 3]
     assert_equal(expected, actual)
     assert_mask_equal(a.mask, actual.mask)
     self.assertTrue(isinstance(actual, np.ma.MaskedArray))
示例#12
0
文件: waves.py 项目: HELIO-HFC/RABAT3
     def write_img(self,filename=None,
                   date=None,receiver=None,
                   data=None,
                   format='jpg',quality=80,
                   data_directory=None,
                   output_filename=None,
                   output_directory=None,
                   min_val=None,max_val=None,
                   verbose=True,greyscale=True,
                   reverse_color=True,
                   download_file=False,
                   delete_file=False,
                   prep=False):
          """
          Write output image file containing the dynamical spectrum.
          """

          if (greyscale):
               mode='L'
          else:
               mode='RGB'

          ext = format.lower()
	  
          if (data is None):
               data = self.get_data(date=date,receiver=receiver,
                                    filename=filename,
                                    data_directory=data_directory, 
                                    download_file=download_file,
                                    delete_file=delete_file,
                    verbose=verbose,prep=prep)
          if (data is None):
               return ""

          array = data.intensity
          if (min_val is None): min_val = array.min()
          if (max_val is None): max_val = array.max()
			
          array = array.clip(min_val,max_val)
          if not ("(db)" in data.intensity_units.lower()):
               array = to_dB(array)
		
          array = bytescale(array)
          if (reverse_color):
               array = array.max() - array
               image = Image.fromarray(array,mode=mode)

          if (output_filename is None):
               if (filename is None):
                    filename = self.get_filename(date,receiver=receiver)
                    output_filename = os.path.basename(filename)+"."+ext
		
          if (output_directory is None):
               output_path = output_filename
          else:
               output_path = os.path.join(output_directory,os.path.basename(output_filename))
		
          image.save(output_path,quality=quality)
		
          return output_path
示例#13
0
def hdf2video(data,imgh5,outfn,clim):
    outfn = Path(outfn).expanduser()

    import cv2
    try:
        from cv2.cv import FOURCC as fourcc #Windows needs from cv2.cv
    except ImportError:
        from cv2 import VideoWriter_fourcc as fourcc

    outfn = outfn.with_suffix('.ogv')
    cc4 = fourcc(*'THEO')
    # we use isColor=True because some codecs have trouble with grayscale
    hv = cv2.VideoWriter(str(outfn),cc4, fps=33,
                         frameSize=data.shape[1:][::-1],  #frameSize needs col,row
                         isColor=True) #right now we're only using grayscale
    if not hv.isOpened():
        raise TypeError('trouble starting video file')

    for d in data:
        #RAM usage explodes if scaling all at once on GB class file
    #for d in bytescale(data,1000,4000):
    #for d in sixteen2eight(data,(1000,4000)):
        hv.write(gray2rgb(bytescale(d,clim[0],clim[1])))

    hv.release()
示例#14
0
 def getGestureRegion(self, frameNum):
     """ Get gesture region for the given frame """
     # get Depth frame
     depthData = self.getFrame(self.depth, frameNum)
     depthGray = cv2.cvtColor(depthData, cv2.cv.CV_RGB2GRAY)
     
     # get user segmentation frame
     userSeg = self.getFrame(self.user, frameNum)
     userSegGray = cv2.cvtColor(userSeg, cv2.cv.CV_RGB2GRAY)
     userSegGray = cv2.medianBlur(userSegGray, 5)    # Median filter on original user image
     
     # Convert user to binary image
     threshold = 128
     _, userBinImg = cv2.threshold(userSegGray, threshold, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
     
     depthGray[np.where(userBinImg == 0)] = 0
     depthGray = cv2.medianBlur(depthGray, 5)
     
     depthRealValue = depthGray.astype(np.float32) # depth value of real world (0-maxDepth)
     # Convert to depth values
     depthRealValue = depthRealValue / 255.0 * float(self.data['maxDepth'])
     depthRealValue = depthRealValue.round()
     depthRealValue = depthRealValue.astype(np.uint16)
     
     # scale depthGray to 0-255
     depthGray = depthGray.astype(np.uint16)
     depthGray = bytescale(depthGray)
     depthImgValue = np.copy(depthGray)  
 
     return depthImgValue, depthRealValue
示例#15
0
def main(image):

    matplotlib.rcParams["font.size"] = 10

    def show_img(img, axes):
        """Plot the image as float"""
        # img = img_as_float(img)
        ax_img = axes
        ax_img.imshow(img, cmap=plt.cm.gray)
        ax_img.set_axis_off()

        return ax_img

    # Open and read in the fits image
    try:
        fits = pyfits.open(image)
        # fits = Image.open(image)
    except IOError:
        print "Can not read the fits image: " + image + " !!"

    # Check the input image
    img = fits[0].data
    # img = np.array(fits)
    if img.ndim != 2:
        raise NameError("Data need to be 2-D image !")

    # Logrithm scaling of the image
    img_log = np.log10(img)
    img_log = img_as_float(img_log)

    # Contrast streching
    p5, p95 = np.percentile(img, (2, 98))
    img_rescale = exposure.rescale_intensity(img, in_range=(p5, p95))

    # Adaptive equalization
    img_new = bytescale(img_rescale)
    img_ahe = exposure.equalize_adapthist(img_new, ntiles_x=16, ntiles_y=16, clip_limit=0.05, nbins=256)
    img_ahe = img_as_float(img_ahe)

    # Display results
    fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 5))

    # Original image
    ax_img = show_img(img_log, axes[0])
    ax_img.set_title("Original")

    # Contrast Enhanced one
    ax_img = show_img(img_rescale, axes[1])
    ax_img.set_title("Rescale")

    # AHE Enhanced one
    ax_img = show_img(img_ahe, axes[2])
    ax_img.set_title("AHE")

    # Prevent overlap of y-axis
    fig.subplots_adjust(bottom=0.1, right=0.9, top=0.9, left=0.1, wspace=0.05)

    # Save a PNG file
    plt.gcf().savefig("ahe_test.png")
def get_galaxy_images(galaxy_id):
    images = []
    for c in range(3):
        fname = base_dir + 'data/images_training_rev1/' + str(galaxy_id) + '_' + str(c) + '.npy'
        images.append(np.load(fname))
    images = np.dstack(images)

    return bytescale(images)  # scale the arrays for nice color representation of the image
示例#17
0
文件: ndvi.py 项目: cappelaere/coreg2
def linear_stretch( data, min_percentile=1.0, max_percentile=97.0):
			
	pmin, pmax = numpy.percentile(data[numpy.nonzero(data)], (min_percentile, max_percentile))

	data[data>pmax]=pmax
	data[data<pmin]=pmin
	
	bdata = misc.bytescale(data)
	return bdata
def save_image(filename, im, scaling='auto', depth=8):
    """Save an ndarray or image as a tiff.

    Parameters
    ----------
    im : ndarray or :class:`holopy.image.Image`
        image to save.
    filename : basestring
        filename in which to save image. If im is an image the
        function should default to the image's name field if no
        filename is specified
    scaling : 'auto', None, or (None|Int, None|Int)
        How the image should be scaled for saving. Ignored for float
        output. It defaults to auto, use the full range of the output
        format. Other options are None, meaning no scaling, or a pair
        of integers specifying the values which should be set to the
        maximum and minimum values of the image format.
    depth : 8, 16 or 'float'
        What type of image to save. Options other than 8bit may not be supported
        for many image types. You probably don't want to save 8bit images without
        some kind of scaling.

    """
    # if we don't have an extension, default to tif
    if os.path.splitext(filename)[1] is '':
        filename += '.tif'

    # to replicate old behavior from using sp.misc.toimage
    if depth == 8:
        if scaling == 'auto':
            cmin, cmax = None, None
        else:
            cmin, cmax = scaling
        im = bytescale(im)
    elif depth != 'float':
        if scaling is not None:
            if scaling == 'auto':
                min = im.min()
                max = im.max()
            elif len(scaling) == 2:
                min, max = scaling
            else:
                raise Error("Invalid image scaling")
            if min is not None:
                im = im - min
            if max is not None:
                im = im * ((2**depth-1)/max)
        if depth == 8:
            im = (im+.4999).astype('uint8')
        elif depth == 16:
            # PIL can't handle uint16, but seems to do the right thing
            # with int16, so go ahead and use it
            im = (im+.4999).astype('int16')
        else:
            raise Error("Unknown image depth")

    PILImage.fromarray(im).save(filename, autoscale=False)
示例#19
0
def create_composite(red, green, blue):
    img_dim = red.shape
    img = np.zeros((img_dim[0], img_dim[1], 3), dtype=np.float)
    img[:,:,0] = red
    img[:,:,1] = green
    img[:,:,2] = blue
    p2, p98 = np.percentile(img, (2, 98))
    img_rescale = exposure.rescale_intensity(img, in_range=(0, p98))
    return bytescale(img_rescale)
示例#20
0
 def test_bytescale_mask(self):
     a = np.ma.MaskedArray(data=[1, 2, 3], mask=[False, False, True])
     with suppress_warnings() as sup:
         sup.filter(DeprecationWarning)
         actual = misc.bytescale(a)
     expected = [0, 255, 3]
     assert_equal(expected, actual)
     assert_mask_equal(a.mask, actual.mask)
     assert_(isinstance(actual, np.ma.MaskedArray))
示例#21
0
def rgb(hh, hv, spath):
  hh = np.flipud(hh)
  hv = np.flipud(hv)
  
  hhhv = hh/hv
  
  plt.figure
  m.imshow(hhhv, vmin = 0., vmax = 1)
  m.drawrivers(color = 'black', linewidth = 1.)
  m.drawstates(color = 'black', linewidth = 1.5)
  m.drawparallels(parallels, linewidth = 0.0, labels = [1,0,0,0], fontsize=10)
  m.drawmeridians(meridians, linewidth = 0.0, labels = [0,0,0,1], fontsize=10)
  cbar = m.colorbar()
  cbar.set_label('')
  title0 = 'ALOS-1 HH/HV'
  plt.title(title0)
  plt.savefig(spath + 'hhhv.png', transparent = False, bbox_inches = 'tight', dpi = 300)
  plt.close()
  
 
  r = bytescale(hh, cmin = -30, cmax = -10)
  g = bytescale(hh, cmin = -30, cmax = -10)
  b = bytescale(hhhv, cmin = 0.5, cmax = 1)
 
  rgb1 = np.zeros( (rows,cols,3), 'uint8' )
  rgb1[...,0] = r
  rgb1[...,1] = g
  rgb1[...,2] = b
 
  plt.figure
  m.imshow(rgb1)
  m.drawrivers(color = 'black', linewidth = 1.)
  m.drawstates(color = 'black', linewidth = 1.5)
  m.drawparallels(parallels, linewidth = 0.0, labels = [1,0,0,0], fontsize=10)
  m.drawmeridians(meridians, linewidth = 0.0, labels = [0,0,0,1], fontsize=10)
  #cbar = m.colorbar()
  #cbar.set_label('')
  title0 = 'ALOS-1 RGB'
  plt.title(title0)
  plt.savefig(spath + 'rgb1.png', transparent = False, bbox_inches = 'tight', dpi = 300)
  plt.close()
  return hhhv
示例#22
0
文件: node.py 项目: jlaura/autocnet
    def get_array(self, band=1):
        """
        Get a band as a 32-bit numpy array

        Parameters
        ----------
        band : int
               The band to read, default 1
        """

        array = self.geodata.read_array(band=band)
        return bytescale(array)
示例#23
0
def sdo_aia_scale(image=None, wavelength=None):
    '''
    rescale the aia image
    :param image: normalised aia image data
    :param wavelength:
    :return: byte scaled image data
    '''
    from scipy.misc import bytescale
    clrange = sdo_aia_scale_dict(wavelength)
    image[image > clrange['high']] = clrange['high']
    image[image < clrange['low']] = clrange['low']
    image = np.log10(image)
    return bytescale(image)
示例#24
0
 def data_to_bytescale_rgb(
         data):  # used to create the SOURCE PNGs (MRI, FA, MD)
     """
     Converts a single-channel grayscale image to a 3-channel image that can be 
     then saved as a  PNG        
     """
     im = bytescale(data)
     w, h = im.shape
     ret = np.empty((w, h, 3), dtype=np.uint8)
     ret[:, :, 0] = im
     ret[:, :, 1] = im
     ret[:, :, 2] = im
     return ret
示例#25
0
    def get_geodataset(self, nodeIndex):
        """
        Constructs a GeoDataset object from the given node image and assigns the 
        dataset and its NumPy array to the 'handle' and 'image' node attributes.

        Parameters
        ----------
        nodeIndex : int
                    The index of the node.

        """
        self.node[nodeIndex]['handle'] = GeoDataset(self.node[nodeIndex]['image_path'])
        self.node[nodeIndex]['image'] = bytescale(self.node[nodeIndex]['handle'].read_array())
示例#26
0
    def __init__(self, paths, fn, fn_mapping, has_alpha, scale_factor=1.0):
        super().__init__(paths,
                         fn,
                         fn_mapping,
                         has_alpha,
                         scale_factor=scale_factor)
        fpath = os.path.join(self.paths['images'],
                             self.fn_mapping['images'](self.fn))
        _, fname = os.path.split(fpath)
        if "*" in fname:
            files = glob.glob(fpath)
            assert len(
                files
            ) == 1, 'Multiple files match the image file name pattern, please use a unique pattern.'
            fpath = files[0]
            lfpath = fpath.lower()
            if not lfpath.endswith('.jpg') and not lfpath.endswith(
                    '.jpeg') and not lfpath.endswith(
                        '.png') and not lfpath.endswith(
                            '.tif') and not lfpath.endswith(
                                '.tiff') and not lfpath.endswith(
                                    '.bmp') and not lfpath.endswith('.gif'):
                raise Exception('Unsupported file format: ' + fname)

        if fpath.lower().endswith('.tif') or fpath.lower().endswith('.tiff'):
            img = imread(fpath)
        else:
            img = imread(fpath, pilmode="RGB")

        if len(img.shape) == 2:
            rgb_img = np.stack((img, ) * 3, axis=-1)
        else:
            assert len(img.shape) == 3
            # has alpha channel?
            if img.shape[2] == 4:
                rgb_img = img[:, :, :-1]
            else:
                rgb_img = img

        self.im = bytescale(rgb_img)

        if '646f5e00a2db3add97fb80a83ef3c07edd1b17b1b0d47c2bd650cdcab9f322c0' in fn:
            self.im = cv2.imread(os.path.join(self.paths['images'], self.fn),
                                 cv2.IMREAD_COLOR)

        if scale_factor != 1.0:
            width, height, _ = self.im.shape
            self.im = imresize(
                self.im,
                (int(scale_factor * width), int(scale_factor * height)),
                interp='bicubic')
示例#27
0
def linear_stretch(data, min_percentile=1.0, max_percentile=97.0):		
	if verbose:
		print 'linear_stretch', numpy.min(data), numpy.mean(data), numpy.max(data), min_percentile, max_percentile

	pmin, pmax = numpy.percentile(data[numpy.nonzero(data)], (min_percentile, max_percentile))
	if verbose:
		print "pmin:", pmin
		print "pmax:", pmax

	data[data>pmax]=pmax
	data[data<pmin]=pmin
		
	bdata = misc.bytescale(data)
	return bdata
def check():
    #imverting the image
    img = misc.imread('assets/out.png')
    img = (255 - cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)) / 255

    #resizing image into 8X8 matrix
    img = misc.imresize(img, (28, 28))
    img = img.astype('float32')

    #changing byte scale from 16 to 0 according to imput data
    img = misc.bytescale(img, high=16, low=0)

    flat_img = img.reshape(1, 784)
    result = model.predict_classes(flat_img)
    return result
示例#29
0
def pc_image(pca, shape, pc_idx):

    U = REACT2D.build_dct(shape[0], shape[1], 50)

    pca_images = np.empty((shape[0], shape[1], 3))

    pca_images[:, :, 0] = pca.components_[pc_idx, :ncoefs].dot(U.T[:ncoefs, :]).reshape((shape[0], shape[1]))
    pca_images[:, :, 1] = pca.components_[pc_idx, ncoefs:2*ncoefs].dot(U.T[:ncoefs, :]).reshape((shape[0], shape[1]))
    pca_images[:, :, 2] = pca.components_[pc_idx, 2*ncoefs:].dot(U.T[:ncoefs, :]).reshape((shape[0], shape[1]))

    plt.imshow(bytescale(pca_images))
    plt.title('PC ' + str(pc_idx + 1))
    plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
    plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
    plt.show()
示例#30
0
    def loadData(self, list_file_names):
        x_train = []
        y_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 0, 0, 0, 2, 2, 2]
        for name in list_file_names:
            img = misc.imread(name)
            img = misc.imresize(img, (8, 8))
            img = img.astype(np.float64)
            img = misc.bytescale(img, high=16, low=0)
            x_test = []
            for row in img:
                for pixel in row:
                    x_test.append(sum(pixel) / 3.0)

            x_train.append(x_test)
        return x_train, y_labels
示例#31
0
def linear_stretch(data, min_percentile=1.0, max_percentile=97.0):
    if verbose:
        print 'linear_stretch', numpy.min(data), numpy.mean(data), numpy.max(
            data), min_percentile, max_percentile

    pmin, pmax = numpy.percentile(data[numpy.nonzero(data)],
                                  (min_percentile, max_percentile))
    if verbose:
        print "pmin:", pmin
        print "pmax:", pmax

    data[data > pmax] = pmax
    data[data < pmin] = pmin

    bdata = misc.bytescale(data)
    return bdata
示例#32
0
def img2array(image, type):
    # Reading image
    img = misc.imread(image)
    # Resizing image to 8 * 8 = 64
    img = misc.imresize(img, (8, 8))
    # Updating type from unsigned int 8 to signed float 64
    img = img.astype(type)
    # We have data set image of integer pixels in the range 0..16.
    img = misc.bytescale(img, high=16, low=0)

    array = []
    # Converting out 3D array to 1D Array
    for each_row in img:
        for each_pixel in each_row:
            array.append(((sum(each_pixel)) / 3.0))

    return array
def find_Combination(drug1, drug2, batch):

    data = pandas.read_sql(
        "select  Image_FileName_DAPI,Image_FileName_Mitotracker,Image_FileName_BetaTubulin,Image_PathName_DAPI  from DPN1018"
        + batch + "Per_Image where (Image_Metadata_ID_A  = '" + drug1 +
        "' and Image_Metadata_ID_B = '" + drug2 +
        "') or (Image_Metadata_ID_A  = '" + drug2 +
        "' and Image_Metadata_ID_B = '" + drug1 + "');",
        con=db)

    if len(data) == 0:
        print 'No Images found (Check spelling)'
        exit()

    images = []
    foci = []
    for image, image2, image3, path in zip(data['Image_FileName_DAPI'],
                                           data['Image_FileName_BetaTubulin'],
                                           data['Image_FileName_Mitotracker'],
                                           data['Image_PathName_DAPI']):
        path = path.split('lab_menche')[1]

        foci.append(int(image.split('f0')[1].split('p')[0]))

        k = misc.imread("/Volumes/scratch/lab_menche" + path + "/" + image)
        k2 = misc.imread("/Volumes/scratch/lab_menche" + path + "/" + image2)
        k3 = misc.imread("/Volumes/scratch/lab_menche" + path + "/" + image3)

        #print k
        #exit()

        rgb = np.dstack((k3, k2, k))

        #r_rgb = misc.imresize(rgb, 100)
        #plt.imshow(rgb)
        #plt.show()

        test = misc.bytescale(rgb)

        #plt.imshow(test)
        #plt.show()
        #plt.close()

        images.append(test)

    return create_combined_Image(images, foci)
示例#34
0
def calculate_color_features(data):
    """ Calculates color features related to the greenness of each point.

    The default features are [a, b, NGRDVI] where a and b are the green-red and
    blue-yellow coordinates of the CIE-Lab color space.

    Args:
        data: An n x d array of input data. Rows are [x, y, z, r, g, b, ...]

    Returns:
        An n x 3 array of features for each point.
    """

    rgb = bytescale(data[:, 3:6]).astype(np.int16)
    lab = rgb2lab(np.array([rgb]))[0].reshape(-1, 3)
    ngrdvi = calculate_ngrdvi(data).reshape(-1, 1)
    return np.hstack([lab[:, 1:3], ngrdvi])
示例#35
0
def nhance(frames, pct=1.0, frs=60, swp=30):
    with tf.Session(config=config) as sess:
        x_, enhanced = build_net()

        saver = tf.train.Saver()
        saver.restore(sess, "models/iphone")

        imgs = np.float16(frames) / 255
        vlen = frames.shape[0]

        ret = np.empty(frames.shape, dtype=np.uint8)
        if pct < 1.0:
            ret_ba = np.copy(frames)
            lim_pt = int(img_w * (1.0 - pct))
        else:
            ret_ba = None

        ret_sw = np.copy(frames)

        ptr = 0
        while ptr < vlen:
            print('Ptr is: ', ptr)
            bsz = batch_sz if ptr + batch_sz <= vlen else (vlen - ptr)
            batch = np.reshape(imgs[ptr:ptr + bsz], [bsz, img_sz])
            r_batch = sess.run(enhanced, feed_dict={x_: batch})
            r_batch = bytescale(np.reshape(r_batch,
                                           [bsz, img_h, img_w, img_d]))
            ret[ptr:ptr + batch_sz, :, :, :] = r_batch
            if pct < 1.0:
                ret_ba[ptr:ptr + batch_sz, :, lim_pt:, :] = r_batch[:, :,
                                                                    lim_pt:, :]
            if ptr > frs + swp:
                ret_sw[ptr:ptr + batch_sz, :, :, :] = r_batch
            else:
                for j in range(bsz):
                    if ptr + j <= frs:
                        continue
                    elif ptr + j <= frs + swp:
                        prop = ((ptr + j - frs) * 1.0) / (swp * 1.0)
                        ppoz = int(img_w * prop)
                        ret_sw[ptr + j, :, :ppoz, :] = r_batch[j, :, :ppoz, :]
                    else:
                        ret_sw[ptr + j, :, :, :] = r_batch[j, :, :, :]
            ptr += bsz

        return ret, ret_ba, ret_sw
示例#36
0
    def data_to_labels_rgb(data, NUM_LABELS, RANGE_MIN, RANGE_MAX):
        """
        Rescales the histology for deep learning processing.
        Since labels start at zero, the high parmeter is NUM_LABELS-1

        the cmin and cmax parameters depend on the properties of the histology
        and can be changed to obtain a different linear mapping of the final labels

        Example:

        test = np.linspace(0,1,num=100)

        array([ 0.        ,  0.01010101,  0.02020202,  0.03030303,  0.04040404,
                0.05050505,  0.06060606,  0.07070707,  0.08080808,  0.09090909,
                ...

                0.95959596,  0.96969697,  0.97979798,  0.98989899,  1.        ])

        bytescale(test,low=0, high=15, cmin=0,cmax=0.5)
        array([ 0,  0,  1,  1,  1,  2,  2,  2,  2,  3,  3,  3,  4,  4,  4,  5,  5,
        5,  5,  6,  6,  6,  7,  7,  7,  8,  8,  8,  8,  9,  9,  9, 10, 10,
       10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14, 15, 15, 15,
       15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
       15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
       15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15], dtype=uint8)

        bytescale(test,low=0, high=15-1, cmin=0,cmax=0.5)
        array([ 0,  0,  1,  1,  1,  1,  2,  2,  2,  3,  3,  3,  3,  4,  4,  4,  5,
        5,  5,  5,  6,  6,  6,  7,  7,  7,  7,  8,  8,  8,  8,  9,  9,  9,
       10, 10, 10, 10, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 14, 14, 14,
       14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
       14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
       14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14], dtype=uint8)

        """
        im = bytescale(data,
                       low=0,
                       high=NUM_LABELS - 1,
                       cmin=RANGE_MIN,
                       cmax=RANGE_MAX)
        w, h = data.shape
        ret = np.empty((w, h, 3), dtype=np.uint8)
        ret[:, :, 0] = im
        ret[:, :, 1] = im
        ret[:, :, 2] = im
        return ret
示例#37
0
文件: lipitk.py 项目: vihari/CSD
def load_data(fldr):
    IMAGE_SIZE = 32
    images, labels, uids = [], [], []

    width, height = IMAGE_SIZE, IMAGE_SIZE
    MAX_NUM_DOMAINS = 110
    uid = 0
    cache_fname = 'data/lipitk.pkl'
    if os.path.exists(cache_fname):
        images, labels, uids = pickle.load(open(cache_fname, "rb"))
    else:
        for dom in tqdm.tqdm(range(MAX_NUM_DOMAINS)):
            dom_fldr = "%s/usr_%d" % (fldr, dom)
            if not os.path.exists(dom_fldr):
                continue
            for fname in os.listdir(dom_fldr):
                if fname.find('.tiff') < 0:
                    continue
                li = int(fname.split('t')[0])

                img = misc.imread(dom_fldr + "/" + fname)
                img = misc.imresize(img, (height, width))
                img = img.astype(np.float32)
                img = misc.bytescale(img)
                img = img.astype(np.uint8)

                assert np.max(img) <= 255 and np.min(
                    img) >= 0, "Max and min of image: %f %f" % (np.max(img),
                                                                np.min(img))
                img = (img - 128.) / 128.
                assert np.max(img) != np.min(img)
                images.append(img)
                labels.append(li)
                uids.append(uid)
            uid += 1
        pickle.dump((images, labels, uids), open(cache_fname, "wb"))

    print("Labels: %s uids: %s" % (labels[:10], uids[:10]))
    print("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
    print("Test images: ", np.max(images[0]), np.min(images[0]))

    print("Read %d examples" % len(images))
    uids = np.array(uids)
    np.random.shuffle(uids)
    return np.array(images), np.array(labels), uids
def _add_to_tfrecord(image_dir, tfrecord_writer, split_name):
    """Loads images and writes files to a TFRecord.

    Args:
      image_dir: The image directory where the raw images are stored.
      list_filename: The list file of images.
      tfrecord_writer: The TFRecord writer to use for writing.
    """
    # filenames = tf.train.match_filenames_once(os.path.join(image_dir,'\*.jpg'))
    list = os.listdir(image_dir)
    i = 0
    count = 0
    for filename in list:
        i += 1

        filenames = os.listdir(os.path.join(image_dir + "/", filename))

        nums = len(filenames)
        count += nums
        shape = (_IMAGE_HEIGHT, _IMAGE_WIDTH, _IMAGE_CHANNELS)
        with tf.Graph().as_default():
            image = tf.placeholder(dtype=tf.uint8, shape=shape)
            encoded_png = tf.image.encode_jpeg(image)
            j = 0

            with tf.Session('') as sess:
                for line in filenames:
                    sys.stdout.write(
                        '\r>> Converting %s%s image %d/%d  now%d images' %
                        (filename, split_name, j + 1, nums, count))
                    sys.stdout.flush()
                    j += 1
                    image_data = misc.imread(
                        os.path.join(image_dir + "/" + filename + "/", line))
                    label = i - 1
                    image_data = misc.bytescale(image_data)
                    image_data = misc.imresize(image_data,
                                               [_IMAGE_HEIGHT, _IMAGE_WIDTH])
                    png_string = sess.run(encoded_png,
                                          feed_dict={image: image_data})
                    example = image_to_tfexample(png_string, label,
                                                 bytes(line,
                                                       'utf-8'), _IMAGE_HEIGHT,
                                                 _IMAGE_WIDTH, b'jpg')
                    tfrecord_writer.write(example.SerializeToString())
def process(source, IMAGE_SIZE=224):
    ds = dicom.read_file(source)
    pixel_array = ds.pixel_array
    height, width = pixel_array.shape
    if height < width:
        pixel_array = pixel_array[:,
                                  int((width - height) /
                                      2):int((width + height) / 2)]
    else:
        pixel_array = pixel_array[int((height - width) /
                                      2):int((width + height) / 2), :]
    im = cv2.resize(pixel_array, (IMAGE_SIZE, IMAGE_SIZE))
    im = bytescale(im)
    # im = im / 256
    im = np.dstack((im, im, im))
    im = im[:, :, [2, 1, 0]]
    im = im.transpose((2, 0, 1))
    return im
def check():
    img = misc.imread('assets/out.png')
    #imverting the image
    img = 255 - cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    #resizing image into 8X8 matrix
    img = misc.imresize(img, (8, 8))
    img = img.astype('float64')

    #changing byte scale from 16 to 0 according to imput data
    img = misc.bytescale(img, high=16, low=0)

    # uncomment if you want to see the image
    # plt.imshow(img, cmap=plt.cm.gray_r, interpolation='nearest')

    flat_img = img.reshape(1, 64)
    result = clf.predict(flat_img)
    return result
示例#41
0
def imshow(image, colormap=False, video=False):
    import imageio
    import iterm2_tools
    from matplotlib import cm
    from scipy.misc import bytescale
    from PIL import Image
    from iterm2_tools.images import display_image_bytes

    if type(image).__name__ == 'Variable':
        image = image.data
    if 'torch.cuda' in type(image).__module__:
        image = image.cpu()
    if 'Tensor' in type(image).__name__:
        image = image.numpy()

    if colormap:
        image = (cm.Blues(image) * 255).astype(np.uint8)
    else:
        image = bytescale(image)

    if image.ndim == 4:
        video = True
    if image.ndim==3 and (image.shape[0] not in [1,3] and image.shape[-1] not in [1,3]):
        video = True

    if video:
        if image.shape[1] == 3:
            image = image.transpose([2,3,1]).astype(np.uint8)
        image = image.squeeze()
        if image.ndim == 2:
            image = image[None]
        images = [im for im in image]
        s = imageio.mimsave(imageio.RETURN_BYTES, images, format='gif', duration=0.3)
        print(display_image_bytes(s))
    else:
        if image.shape[0] == 3:
            image = image.transpose([1,2,0]).astype(np.uint8)
        image = image.squeeze()
        s = imageio.imsave(imageio.RETURN_BYTES, image, format='png')
        s = display_image_bytes(s)
        # Depending on the version of iterm2_tools, display_image_bytes can
        # either print directly to stdout or return the string to print.
        if s is not None:
            print(s)
def _create_blanks(blank_sec_length, on_msec_length, off_msec_length,
                   stimulus_shape, blank_loc):
    """create the blank stimuli

    stimulus_shape: 2-tuple of ints. specifies the shape of a single stimulus.
    """
    nblanks = blank_sec_length / ((on_msec_length + off_msec_length) / 1000.)
    if nblanks != int(nblanks):
        raise Exception(
            "Because of your timing ({loc}_blank_sec_length, on_msec_length, and "
            "off_msec_length), I can't show blanks for the {loc} {length:.02f} seconds"
            ". {loc}_blank_sec_length must be a multiple of on_msec_length+"
            "off_msec_length!".format(loc=blank_loc, length=blank_sec_length))
    nblanks = int(nblanks)
    blanks = smisc.bytescale(np.zeros(
        (nblanks, stimulus_shape[0], stimulus_shape[1])),
                             cmin=-1,
                             cmax=1)
    return nblanks, blanks
示例#43
0
def main():
    fig = plt.figure()
    axis = [fig.add_subplot(211 + _) for _ in range(2)]
    s = open(XPM_INFILE, 'rb').read()
    nda = xpm.XPM(s)  # as ndarray (dtype=np.uint8) BGR(A)
    sys.stderr.write('%s\n' % str(nda))
    r, c, m = nda.shape
    img = Image.frombuffer('RGBA', (c, r), nda, 'raw', 'BGRA', 0, 1)
    img.show()  # PIL.Image
    bm = np.array(img)  # RGB(A)
    axis[0].imshow(bm)
    # misc.imsave(XPM_OUTGIF_0, np.uint8(bm)) # changed
    misc.imsave(XPM_OUTGIF_0, np.float32(bm))  # color changed
    # misc.imsave(XPM_OUTGIF_1, misc.bytescale(bm, cmin=0, cmax=255)) # changed
    misc.imsave(XPM_OUTGIF_1, misc.bytescale(bm))  # color changed
    # im = misc.toimage(bm, cmin=0, cmax=255) # same as PIL.Image
    im = misc.toimage(bm)  # same as PIL.Image
    im.save(XPM_OUTPNG)  # palette ok
    axis[1].imshow(im)
    plt.show()
def visualize_intermidiate_output(model, X, layer_number, folder_path,
                                  visualize_train_or_test):

    get_layer_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[layer_number].output])
    if visualize_train_or_test == 'train':
        layer_output = get_layer_output([X, 1])[0]
    if visualize_train_or_test == 'test':
        layer_output = get_layer_output([X, 0])[0]
    print(layer_number, layer_output.shape)

    if not os.path.exists(folder_path + str(layer_number) + '/'):
        os.makedirs(folder_path + str(layer_number) + '/')
    count = 1
    for data in layer_output[0]:
        img_i = bytescale(data)
        pic_name = folder_path + str(layer_number) + '/' + str(count) + '.jpg'
        cv2.imwrite(pic_name, img_i)
        count += 1
示例#45
0
def proc_imgs_comp(i, warp_matrices, bndFolders, panel_irradiance):
    
    
    
    i.compute_reflectance(panel_irradiance) 
    #i.plot_undistorted_reflectance(panel_irradiance)  


    cropped_dimensions, edges = imageutils.find_crop_bounds(i, warp_matrices)
    
    im_aligned = imageutils.aligned_capture(i, warp_matrices,
                                            cv2.MOTION_HOMOGRAPHY,
                                            cropped_dimensions,
                                            None, img_type="reflectance")
    
    im_display = np.zeros((im_aligned.shape[0],im_aligned.shape[1],5), dtype=np.float32 )
    
    for iM in range(0,im_aligned.shape[2]):
        im_display[:,:,iM] =  imageutils.normalize(im_aligned[:,:,iM])
    
    rgb = im_display[:,:,[2,1,0]] 
    #cir = im_display[:,:,[3,2,1]] 
    RRENir = im_display[:,:,[4,3,2]] 
    
    imoot = [rgb, RRENir]
    imtags = ["RGB.tif", "RRENir.tif"]
    im = i.images[1]
    hd, nm = os.path.split(im.path[:-5])
    
    for ind, k in enumerate(bndFolders):
         
         img8 = bytescale(imoot[ind])
         
         outfile = os.path.join(k, nm+imtags[ind])
         
         imageio.imwrite(outfile, img8)
        
         cmd = ["exiftool", "-tagsFromFile", im.path,  "-file:all", "-iptc:all",
               "-exif:all",  "-xmp", "-Composite:all", outfile, 
               "-overwrite_original"]
         call(cmd)
def auto_contrast(image, low=0.02, high=0.99):

    max_val = np.max(image)
    min_val = np.min(image)
    imb = misc.bytescale(image)
    xh = np.array(range(257))
    histo = np.histogram(imb, bins=xh)
    xh = histo[1][0:-1]
    yh = histo[0]
    #    plot(xh,yh)
    #    show()
    yhtot = np.sum(yh)

    # Get mininmum level
    yh_i = 0.0
    i = 0
    if (low <= 0.0):
        lev_min = min_val
    else:
        while (yh_i < low * yhtot):
            yh_i += yh[i]
            i += 1
        lev_min = (max_val - min_val) * (float(xh[i - 1]) / 255.0) + min_val

    # Get maximum level
    yh_i = 0.0
    i = 0
    if (high >= 1.0):
        lev_max = max_val
    else:
        while (yh_i < high * yhtot):
            yh_i += yh[i]
            i += 1
        lev_max = (max_val - min_val) * (float(xh[i - 1]) / 255.0) + min_val

    il = np.where(image <= lev_min)
    image[il] = lev_min
    ih = np.where(image >= lev_max)
    image[ih] = lev_max

    return image
def pibayerraw(fn, exposure_sec, bit8):
    with PiCamera() as cam:  #load camera driver
        print('camera startup gain autocal')
        #LED automatically turns on, this turns it off
        cam.led = False
        sleep(
            0.75
        )  # somewhere between 0.5..0.75 seconds to let camera settle to final gain value.
        setparams(
            cam, exposure_sec
        )  #wait till after sleep() so that gains settle before turning off auto
        getparams(cam)
        counter = 1
        #%% main loop
        while True:
            #            tic = time()
            img10 = grabframe(cam)
            #            print('{:.1f} sec. to grab frame'.format(time()-tic))
            #%% linear scale 10-bit to 8-bit
            if bit8:
                img = bytescale(img10, 0, 1024, 255, 0)
            else:
                img = img10
#%% write to PNG or JPG or whatever based on file extension
            max_value = img.max()
            print(max_value)
            if max_value > 50:
                idx = unravel_index(img.argmax(), img.shape)
                xidx = idx[0]
                yidx = idx[1]
                print(xidx, yidx)
                xlow = max(0, xidx - 25)
                ylow = max(0, yidx - 25)
                xhi = min(1944, xidx + 25)
                yhi = min(2592, yidx + 25)
                imsave(fn + '%03d' % counter + '.png', img[xlow:xhi, ylow:yhi])
                counter = counter + 1


#                break
    return img
示例#48
0
文件: network.py 项目: jmoul/autocnet
    def get_array(self, nodeindex, downsampling=1):
        """
        Downsample the input image file by some amount using bicubic interpolation
        in order to reduce data sizes for visualization and analysis, e.g. feature detection

        Parameters
        ----------
        nodeindex : hashable
                    The index into the node containing a geodataset object

        downsampling : int
                       [1, infinity] downsampling
        """

        array = self.node[nodeindex]['handle'].read_array()
        newx_size = int(array.shape[0] / downsampling)
        newy_size = int(array.shape[1] / downsampling)

        resized_array = imresize(array, (newx_size, newy_size), interp='bicubic')
        self.node[nodeindex]['image'] = bytescale(resized_array)
        self.node[nodeindex]['image_downsampling'] = downsampling
def hough_circle_image(img, dp, mindist, param1, param2, minr, maxr):
    img = bytescale(img)

    circles = cv2.HoughCircles(img,
                               cv2.HOUGH_GRADIENT,
                               dp,
                               mindist,
                               param1=param1,
                               param2=param2,
                               minRadius=minr,
                               maxRadius=maxr)
    if circles is not None:
        circles = np.uint16(np.around(circles))

        for i in circles[0, :]:
            # draw the outer circle
            cv2.circle(img, (i[0], i[1]), i[2], (0, 255, 0), 2)
            # draw the center of the circle
            cv2.circle(img, (i[0], i[1]), 2, (0, 0, 255), 3)

    return (img, circles)
示例#50
0
def main(image):
    #global path
    #a = 0
    #stop = False
    #while not stop:
    #if a < len(commands):
    #for n, i in enumerate(commands):
    #i = path + i

    digits = datasets.load_digits()

    n_samples = len(digits.images)

    #features = digits.data
    features = digits.images.reshape((n_samples, -1))
    labels = digits.target
    clf = SVC(gamma=0.0001, C=100)
    clf.fit(features[:n_samples], labels[:n_samples])

    img = imread(image)
    #print(f"{img}img = imread(i)")
    img = transform.resize(img, (8, 8))
    #print(f"{img}img = transform.resize(img, (8,8))")
    img = img.astype(digits.images.dtype)
    #img = transform.resize(img, (8,8))
    #print(f"{img}img = img.astype(digits.images.dtype)")
    img = bytescale(img, high=16, low=0)
    imshow(img)
    #print(f"{img}img = bytescale(img, high=16, low=0)")

    x_test = []
    for eachRow in img:
        for eachPixel in eachRow:
            x_test.append(sum(eachPixel) / 3)
    clf.predict([x_test])
    #print('\n' * 2)
    #print(f"{clf.predict([x_test])} - recognized digit :)")
    #print('\n' * 2)
    #a += 1
    save_data(clf.predict([x_test]))  #
示例#51
0
def auto_contrast(image,low=0.02,high=0.99):

    max_val = np.max(image)
    min_val = np.min(image)
    imb = misc.bytescale(image)
    xh = np.array(range(257))
    histo = np.histogram(imb,bins=xh)
    xh = histo[1][0:-1]
    yh = histo[0]
    #    plot(xh,yh)
    #    show()
    yhtot = np.sum(yh)

    # Get mininmum level
    yh_i = 0.0 ; i=0
    if (low <= 0.0):
        lev_min = min_val
    else:
        while (yh_i < low*yhtot):
            yh_i += yh[i]
            i += 1
        lev_min = (max_val - min_val)*(float(xh[i-1])/255.0) + min_val

    # Get maximum level
    yh_i = 0.0 ; i=0
    if (high >= 1.0):
        lev_max = max_val
    else:
        while (yh_i < high*yhtot):
            yh_i += yh[i]
            i += 1
        lev_max = (max_val - min_val)*(float(xh[i-1])/255.0) + min_val   
    
    il = np.where(image <= lev_min)
    image[il] = lev_min
    ih = np.where(image >= lev_max)
    image[ih] = lev_max

    return image
示例#52
0
def nodules_detection(path):
    if len(os.listdir('/output/Detector_output/')) == 0:
        detector.detect_nodules('/output/DataPreprocessing_output/',
                                '/output/Detector_output/')
    npy_files = os.listdir('/output/Detector_output/')
    if not os.path.exists('/output/Detector_images/'):
        os.makedirs('/output/Detector_images/')

    if len(npy_files) > 0:
        filenames = []
        for file in npy_files:
            arr = np.load(os.path.join('/output/Detector_output/', file))
            for i, scan in enumerate(arr):
                scan = bytescale(scan)
                img = Image.fromarray(scan)
                filename = file.replace('.npy', str(i)) + '.jpg'
                filenames.append(filename)
                img.save('/output/Detector_images/' + filename)
            #json_arr = jsonify(filenames = filenames)
            return render_template('nodules.html', filenames=filenames)
    else:
        return "No scans to detect Nodules.."
示例#53
0
文件: digits.py 项目: OlichkaKr/IR
def recognize():
    digits = datasets.load_digits()
    features = digits.data
    labels = digits.target

    clf = SVC(gamma=0.001)
    clf.fit(features, labels)

    for i in range(1, 8):
        img = misc.imread('c_' + str(i) + ".png")
        # img = misc.imresize(img, (8,8))
        img = img.astype(digits.images.dtype)
        print(img.dtype)
        img = misc.bytescale(img, high=16, low=0)

        x_test = []

        for eachRow in img:
            # for eachPixel in eachRow:
            x_test.append(sum(eachRow) / 3.0)

        print(clf.predict([x_test]))
示例#54
0
def image_save(fname, img_array, low=-1, high=1):
    """
    Saves the image ``img_array`` to ``fname``.

    Args:
        fname: Save image under this filename
        img_array: array of shape ``(height, width)``, ``(1, height, width)``,
            or ``(3, height, width)``.
        low: Lowest pixel value
        high: Highest pixel value.

    The image range must be between ``low`` and ``high``.
    """
    if img_array.dtype != np.uint8:
        if img_array.min() < low:
            raise Exception("Got an image with min {}, but low is set to {}."
                            .format(img_array.min(), low))

        if img_array.max() > high:
            raise Exception("Got an image with max {}, but high is set to {}."
                            .format(img_array.min(), low))

    ndim = len(img_array.shape)
    nb_channels = len(img_array)
    if ndim == 3 and nb_channels == 1:
        img = img_array[0]
    elif ndim == 3 and nb_channels == 3:
        img = np.moveaxis(img_array, 0, -1)
    elif ndim == 2:
        img = img_array
    else:
        raise Exception("Did not understand image shape: {}".format(img_array.shape))

    if img_array.dtype == np.uint8:
        img_bytes = img_array
    else:
        img_0_to_1 = (img - low) / (high - low)
        img_bytes = bytescale(img_0_to_1 * 255, cmin=0, cmax=255)
    toimage(img_bytes).save(fname)
示例#55
0
def make_cca_images(cca, shape, dct_idx=None):

    n_components = cca.x_weights_.shape[1]
    U = REACT2D.build_dct(shape[0], shape[1], 50)

    dct_idx = np.arange(2499)
    U = U[:, dct_idx]

    cca_images = np.empty((n_components, shape[0], shape[1], 3))

    cca_images[:, :, :, 0] = \
        cca.components_[:, dct_idx].dot(U.T).reshape((n_components, shape[0], shape[1]))
    cca_images[:, :, :, 1] = \
        cca.components_[:, dct_idx + len(dct_idx)].dot(U.T).reshape((n_components, shape[0], shape[1]))
    cca_images[:, :, :, 2] = \
        cca.components_[:, dct_idx + 2*len(dct_idx)].dot(U.T).reshape((n_components, shape[0], shape[1]))

    ncca_rows = 3
    ncca_cols = 3
    nplots = 2

    cca_idx = 0
    for plot in range(nplots):
        idx = 1
        plt.clf()
        for row in range(ncca_rows):
            for col in range(ncca_cols):
                print row, col, idx
                plt.subplot(ncca_rows, ncca_cols, idx)
                plt.imshow(bytescale(cca_images[cca_idx, :, :, :]))
                plt.title('CCA ' + str(cca_idx + 1))
                plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
                plt.tick_params(axis='y', which='both', bottom='off', top='off', labelbottom='off')
                idx += 1
                cca_idx += 1
        plt.savefig(plot_dir + 'CCA_Images_' + str(plot + 1) + '.png')
        if doshow:
            plt.show()
示例#56
0
def extract_features(image_array, extractor_parameters):
    """
    This method finds and extracts features from an image using the given dictionary of keyword arguments. 
    The input image is represented as NumPy array and the output features are represented as keypoint IDs 
    with corresponding descriptors.

    Parameters
    ----------
    image_array : ndarray
                  a NumPy array that represents an image
    extractor_parameters : dict
                           A dictionary containing OpenCV SIFT parameters names and values. 

    Returns
    -------
    : tuple
      in the form ([list of OpenCV KeyPoints], [NumPy array of descriptors as geometric vectors])
    """

    sift = cv2.xfeatures2d.SIFT_create(**extractor_parameters)
    converted_array = misc.bytescale(image_array)

    return sift.detectAndCompute(converted_array, None)
示例#57
0
文件: utils.py 项目: vihari/CSD
def _load_lipitk(fldr):
    images, labels, uids = [], [], []

    IMAGE_SIZE = 32
    width, height = IMAGE_SIZE, IMAGE_SIZE
    MAX_NUM_DOMAINS = 110
    uid = 0
    for dom in range(MAX_NUM_DOMAINS):
        dom_fldr = "%s/usr_%d" % (fldr, dom)
        if not os.path.exists(dom_fldr):
            continue
        for fname in os.listdir(dom_fldr):
            if fname.find('.tiff') < 0:
                continue
            li = int(fname.split('t')[0])

            img = misc.imread(dom_fldr + "/" + fname)
            img = misc.imresize(img, (height, width))
            img = img.astype(np.float32)
            img = misc.bytescale(img)
            img = img.astype(np.uint8)

            assert np.max(img) <= 255 and np.min(
                img) >= 0, "Max and min of image: %f %f" % (np.max(img),
                                                            np.min(img))
            img = img / 255.
            images.append(img)
            labels.append(li)
            uids.append(uid)
        uid += 1

    print("Labels: %s uids: %s" % (labels[:10], uids[:10]))
    print("Labels: %s uids: %s" % (labels[-10:], uids[-10:]))
    print("Test images: ", np.max(images[0]), np.min(images[0]))

    print("Read %d examples" % len(images))
    return np.array(images), np.array(labels), np.array(uids)
示例#58
0
def ScaleImage( Image ):
    '''
    Create a byte scaled image with specified histogram
    '''
    #Range = numpy.array([0.0, 0.8])
    Range = numpy.array([0.0, 1.0])
    ScaledImage = bytescale( Image, cmin=Range[0], cmax=Range[1] )

    # MODIS Rapid Response Enhancement for True Colour
    # x array of input values
    x = numpy.array([0,  30,  60, 120, 190, 255])

    # y array of output values
    y = numpy.array([0, 110, 160, 210, 240, 255])

    # Create output array
    rows = Image.shape[0]
    cols = Image.shape[1]

    Scaled = numpy.zeros( (rows,cols), numpy.uint8 )

    for i in range(x.shape[0] - 1):
        x1 = x[i]
        x2 = x[i + 1]
        y1 = y[i]
        y2 = y[i + 1]

        m =  (y2 - y1) / float((x2 - x1))
        b = y2 - (m * x2)

        mask = numpy.where( (ScaledImage >= x1) & (ScaledImage < x2) )
        Scaled[mask] = (m * ScaledImage + b)[mask]

    mask = numpy.where( ScaledImage >= x2 )
    Scaled[mask] = 255

    return Scaled
示例#59
0
    def showImage(self):
        #https://github.com/shuge/Enjoy-Qt-Python-Binding/blob/master/image/display_img/pil_to_qpixmap.py
        #myQtImage = ImageQt(im)
        #qimage = QtGui.QImage(myQtImage)
        im = self.image
        if (self.divide_background.isChecked()
                and self.background_image is not None):
            im = np.true_divide(im, self.background_image)
            im = bytescale(im)
        elif self.bit_depth > 8:
            # if we ask the camera for more than 8 bits, we will get a 16 bit
            # image that uses the upper bits, so discard the lower 8 bits to get
            # something we can show on the screen
            im = im / 2**8
        im = to_pil_image(im)
        data = im.convert("RGBA").tostring('raw', "RGBA")

        qim = QtGui.QImage(data, self.roi_shape[0], self.roi_shape[1],
                           QtGui.QImage.Format_ARGB32)
        pixmap = QtGui.QPixmap.fromImage(qim)

        myScaledPixmap = pixmap.scaled(QtCore.QSize(900, 900))

        self.frame.setPixmap(myScaledPixmap)
示例#60
0
def GuessAnchorRegion(whole_img, sample_region):
    """
    It detects a region with clean edges, proper for drift measurements. This region
    must not overlap with the sample that is to be scanned due to the danger of
    contamination.
    whole_img (ndarray): 2d array with the whole SEM image
    sample_region (tuple of 4 floats): roi of the sample in order to avoid overlap
    returns (tuple of 4 floats): roi of the anchor region
    """
    # Drift correction region shape
    dc_shape = (50, 50)

    # Properly modified image for cv2.Canny
    uint8_img = misc.bytescale(whole_img)

    # Generates black/white image that contains only the edges
    cannied_img = cv2.Canny(uint8_img, 100, 200)

    # Mask the sample_region plus a margin equal to the half of dc region and
    # a margin along the edges of the whole image again equal to the half of
    # the anchor region. Thus we keep pixels that we can use as center of our
    # anchor region knowing that it will not overlap with the sample region
    # and it will not be outside of bounds
    masked_img = cannied_img

    # Clip between the bounds
    left = sorted(
        (0, sample_region[0] * whole_img.shape[0] - (dc_shape[0] / 2),
         whole_img.shape[0]))[1]
    right = sorted(
        (0, sample_region[2] * whole_img.shape[0] + (dc_shape[0] / 2),
         whole_img.shape[0]))[1]
    top = sorted((0, sample_region[1] * whole_img.shape[1] - (dc_shape[1] / 2),
                  whole_img.shape[1]))[1]
    bottom = sorted(
        (0, sample_region[3] * whole_img.shape[1] + (dc_shape[1] / 2),
         whole_img.shape[1]))[1]
    masked_img[left:right, top:bottom].fill(0)
    masked_img[0:(dc_shape[0] / 2), :].fill(0)
    masked_img[:, 0:(dc_shape[1] / 2)].fill(0)
    masked_img[masked_img.shape[0] -
               (dc_shape[0] / 2):masked_img.shape[0], :].fill(0)
    masked_img[:, masked_img.shape[1] -
               (dc_shape[1] / 2):masked_img.shape[1]].fill(0)

    # Find indices of edge pixels
    occurrences_indices = numpy.where(masked_img == 255)
    X = numpy.matrix(occurrences_indices[0]).T
    Y = numpy.matrix(occurrences_indices[1]).T
    occurrences = numpy.hstack([X, Y])

    # If there is such a pixel outside of the sample region and there is enough
    # space according to dc_shape, use the masked image and calculate the anchor
    # region roi
    if len(occurrences) > 0:
        # Enough space outside of the sample region
        anchor_roi = ((occurrences[0, 0] -
                       (dc_shape[0] / 2)) / whole_img.shape[0],
                      (occurrences[0, 1] -
                       (dc_shape[1] / 2)) / whole_img.shape[1],
                      (occurrences[0, 0] +
                       (dc_shape[0] / 2)) / whole_img.shape[0],
                      (occurrences[0, 1] +
                       (dc_shape[1] / 2)) / whole_img.shape[1])

    else:
        # Not enough space outside of the sample region
        # Pick a random pixel
        cannied_img = cv2.Canny(uint8_img, 100, 200)
        # Find indices of edge pixels
        occurrences_indices = numpy.where(cannied_img == 255)
        X = numpy.matrix(occurrences_indices[0]).T
        Y = numpy.matrix(occurrences_indices[1]).T
        occurrences = numpy.hstack([X, Y])
        anchor_roi = ((occurrences[0, 0] -
                       (dc_shape[0] / 2)) / whole_img.shape[0],
                      (occurrences[0, 1] -
                       (dc_shape[1] / 2)) / whole_img.shape[1],
                      (occurrences[0, 0] +
                       (dc_shape[0] / 2)) / whole_img.shape[0],
                      (occurrences[0, 1] +
                       (dc_shape[1] / 2)) / whole_img.shape[1])

    return anchor_roi