def convert_tiff_to_mat(Working_Directory, img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, img_size_crop_y2,\
median_filter_threshold, text_file):
    #Get names of all tiff files in the directory
    onlyfiles = [ f for f in os.listdir(Working_Directory)\
    if (os.path.isfile(os.path.join(Working_Directory, f)) and f.find('.tif')>0 and f.find('T=')>=0)]
    
    Working_Directory2 = Working_Directory.replace('/Thresholded_OB','')        
    
    
    for lst in xrange(1,np.size(onlyfiles, axis=0)+1):
        tif1 = TIFF.open(Working_Directory+'T='+str(lst)+'.tif', mode='r') #Open multitiff 
        tif2 = TIFF.open(Working_Directory2+'T='+str(lst)+'.tif', mode='r')   #Open non thresholded image for template
         #Initialize data matrix based on number of planes in the multitiff        
        if lst==1:            
            count_z = 0
            for image in tif1.iter_images():
                count_z = count_z + 1           
            data_filtered = np.zeros((img_size_x-(img_size_crop_x1+img_size_crop_x2), img_size_y-(img_size_crop_y1+img_size_crop_y2), count_z,np.size(onlyfiles,0)), dtype=np.uint8)
            data = np.zeros((img_size_x-(img_size_crop_x1+img_size_crop_x2), img_size_y-(img_size_crop_y1+img_size_crop_y2), count_z,np.size(onlyfiles,0)), dtype=np.uint8)

        
        data_filtered, count_z = get_tif_images_filtered(data_filtered, lst, onlyfiles, text_file, tif1, \
        img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, \
        img_size_crop_y2,median_filter_threshold)
        
        data = get_tif_images_raw(data, lst, onlyfiles, text_file, tif2, \
        img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, \
        img_size_crop_y2)
        
    return data, data_filtered, count_z
Пример #2
0
    def get_data(self):
        filenames = self.get_filesindirectory(self.get_prefix(), self.get_extension())
        try:
            from libtiff import TIFF

            im = TIFF.open(filenames[0])
            v0 = im.read_image()
            x = NXfield(range(v0.shape[1]), dtype=np.uint16, name="x")
            y = NXfield(range(v0.shape[0]), dtype=np.uint16, name="y")
            z = NXfield(range(1, len(filenames) + 1), dtype=np.uint16, name="z")
            v = NXfield(np.zeros(shape=(len(filenames), v0.shape[0], v0.shape[1]), dtype=v0.dtype), name="v")
            v[0] = v0
            for i in range(1, len(filenames)):
                im = TIFF.open(filenames[i])
                v[i] = im.read_image()
        except ImportError:
            im = Image.open(filenames[0])
            dtype = np.dtype(np.uint16)
            if im.mode == "I;32" or im.mode == "I":
                dtype = np.dtype(np.uint32)
            x = NXfield(range(im.size[0]), dtype=np.uint16, name="x")
            y = NXfield(range(im.size[1]), dtype=np.uint16, name="y")
            z = NXfield(range(1, len(filenames) + 1), dtype=np.uint16, name="z")
            v = NXfield(np.zeros(shape=(len(filenames), im.size[1], im.size[0]), dtype=dtype), name="v")
            v[0] = np.array(im.getdata(), dtype=dtype).reshape(im.size[1], im.size[0])
            for i in range(1, len(filenames)):
                im = Image.open(filenames[i])
                v[i] = np.array(im.getdata(), dtype=dtype).reshape(im.size[1], im.size[0])
        return NXentry(NXdata(v, (z, y, x)))
Пример #3
0
def downsample_tif(filename_in, filename_out):

	input_tiff = TIFF.open(filename_in, mode = "r")
	output_tiff = TIFF.open(filename_out, mode = "w")

	counter = 0
	for img in input_tiff.iter_images():
		img = img[::2, ::2]
		output_tiff.write_image(img)
		counter += 1

		if counter /100 == counter / 100.:

			print counter
def convert_tiff_to_mat(Working_Directory, img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, img_size_crop_y2,\
median_filter_threshold, text_file):
    #Get names of all tiff files in the directory
    onlyfiles = [ f for f in os.listdir(Working_Directory)\
    if (os.path.isfile(os.path.join(Working_Directory, f)) and f.find('.tif')>0 and f.find('T=')>=0)]
    
    Working_Directory2 = Working_Directory     
    
    average_bg_intensity = np.zeros((np.size(onlyfiles, axis=0),1))
    for lst in xrange(1,np.size(onlyfiles, axis=0)+1):
        tif1 = TIFF.open(Working_Directory+'T='+str(lst)+'.tif', mode='r') #Open multitiff 
        tif2 = TIFF.open(Working_Directory2+'T='+str(lst)+'.tif', mode='r')   #Open non thresholded image for template
         #Initialize data matrix based on number of planes in the multitiff        
        if lst==1:            
            count_z = 0
            for image in tif1.iter_images():
                count_z = count_z + 1           
            data_filtered_raw = np.zeros((img_size_x-(img_size_crop_x1+img_size_crop_x2), img_size_y-(img_size_crop_y1+img_size_crop_y2), count_z,np.size(onlyfiles,0)), dtype=np.uint8)
            data_filtered_bgsub = np.zeros((img_size_x-(img_size_crop_x1+img_size_crop_x2), img_size_y-(img_size_crop_y1+img_size_crop_y2), count_z,np.size(onlyfiles,0)), dtype=np.uint8)            
            data = np.zeros((img_size_x-(img_size_crop_x1+img_size_crop_x2), img_size_y-(img_size_crop_y1+img_size_crop_y2), count_z,np.size(onlyfiles,0)), dtype=np.uint8)

        
        data_filtered_raw, data_filtered_bgsub, count_z, average_bg_intensity[lst-1] = get_tif_images_filtered(data_filtered_raw, data_filtered_bgsub, lst, onlyfiles, text_file, tif1, \
        img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, \
        img_size_crop_y2,median_filter_threshold)
        
        data = get_tif_images_raw(data, lst, onlyfiles, text_file, tif2, \
        img_size_x, img_size_y, img_size_crop_x1,img_size_crop_x2, img_size_crop_y1, \
        img_size_crop_y2)
    
    average_bg_intensity = np.array(average_bg_intensity)
    print np.shape(average_bg_intensity)
    plt.plot(average_bg_intensity)
    plt.show()  
    
    ### If intensities are correlated to the Bg intensity, make them zero
    for ii in xrange(0,np.size(data_filtered_raw,0)):
        for jj in xrange(0,np.size(data_filtered_raw,1)):
            for zz in xrange(0, np.size(data_filtered_raw,2)):
                A = copy(data_filtered_raw[ii,jj,zz,:])
                corr_A = np.corrcoef(A,np.squeeze(average_bg_intensity)) 
                
                if corr_A[0,1] > 0.6 and sum(A)!=0:                    
                                       
                    A[:] = 0                    
                    data_filtered_bgsub[ii,jj,zz,:] = A
                    data[ii,jj,zz,:] = A                    
                    
    return data, data_filtered_bgsub, count_z, average_bg_intensity
Пример #5
0
	def load_stack(self):



		filename, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file', self.last_dir, "TIFF (*.tif *.tiff);; All files (*.*)")
		
		if not len(filename):
			return

		self.init_data()
		self.last_dir = os.path.dirname(filename)
		self.image_location = filename
		

		tif = TIFF.open(self.image_location)

		pic = tif.read_image()
		misc.imsave("temp.jpg", pic)
	
		# TODO: wait message
		#msg_box = QtGui.QMessageBox.information(self, "CellECT New Workspace", "Loading a large stack may take time. Press OK to continue.", defaultB = QtGui.QMessageBox.NoButton )
		self.load_metadata_from_tif_info()

		#pic = QtGui.QImage(filename)
		#self.label_preview_img.setPixmap(QtGui.QPixmap.fromImage(pic))
		self.label_preview_img.setPixmap("temp.jpg")
Пример #6
0
def test_write_lzw():
    for itype in [uint8, uint16, uint32, uint64, 
                  int8, int16, int32, int64,
                  float32, float64,
                  complex64, complex128]:
        #image = array([[1,2,3], [4,5,6]], itype)
        image = array([range(10000)], itype)
        #image = array([[0]*14000], itype)
        fn = mktemp('.tif')
        tif = TIFFimage(image)
        tif.write_file(fn, compression='lzw')
        del tif

        #os.system('wc %s; echo %s' % (fn, image.nbytes))

        tif = TIFF.open(fn,'r')
        image2 = tif.read_image()
        tif.close()
        #os.remove(fn)
        atexit.register(os.remove, fn)
        for i in range(image.size):
            if image.flat[i] != image2.flat[i]:
                print `i, image.flat[i-5:i+5].view(dtype=uint8),image2.flat[i-5:i+5].view(dtype=uint8)`
                break

        assert image.dtype==image2.dtype
        assert (image==image2).all()
Пример #7
0
def save_images_to_disk():
    print ("Disk-saving thread active...")
    n = 0
    frameTimeOutputFile = open(planOutputPath + "frameTimes.txt", "w")
    frameTimeOutputFile.write("frameCount\t n\t frameCond\t frameT\t interval\n")
    currdict = im_queue.get()
    while currdict is not None:
        frameTimeOutputFile.write(
            "%i\t %i\t %i\t %s\t %s\n"
            % (int(currdict["frame"]), n, int(currdict["cond"]), currdict["time"], currdict["interval"])
        )
        if save_as_tiff:
            fname = "%s/frame%i.tiff" % (dataOutputPath, int(currdict["frame"]))
            tiff = TIFF.open(fname, mode="w")
            tiff.write_image(currdict["im"])
            tiff.close()

        elif save_as_npz:
            np.savez_compressed("%s/test%d.npz" % (output_path, n), currdict["im"])
        else:
            fname = "%s/frame%i.tiff" % (dataOutputPath, int(currdict["frame"]))
            with open(fname, "wb") as f:
                pkl.dump(currdict, f, protocol=pkl.HIGHEST_PROTOCOL)

        # 		print 'DONE SAVING FRAME: ', currdict['frame'], n #fdict
        n += 1
        currdict = im_queue.get()

    disk_writer_alive = False
    # frameTimeOutputFile.close()
    print ("Disk-saving thread inactive...")
Пример #8
0
 def _write_single_image(full_uri, image_array):
     """
     internally used method to write single tiff image
     """
     tiff = TIFF.open(full_uri, mode='w')
     tiff.write_image(image_array.astype('uint16'), write_rgb=False)
     tiff.close()
Пример #9
0
	def load_info_from_tif(self, filename):

		tif = TIFF.open(filename)
		buf = StringIO. StringIO(tif.info())		

		img = tif.read_image()

		for image in tif.iter_images():
			self.num_pages += 1

		print self.num_pages
		self.numx = img.shape[0]
		self.numy = img.shape[1]

			
		line = buf.readline()

		self.memch = 0

		while line:
			self.get_meta_from_line(line)
			line = buf.readline()

		if self.numt ==0:
			self.numt = 1

		if self.numz ==0:
			self.numz =1

		if self.numch == 0:
			self.numch = 1
Пример #10
0
def test_write_read():

    for compression in [None, 'lzw']:
        for itype in [uint8, uint16, uint32, uint64, 
                      int8, int16, int32, int64,
                      float32, float64,
                      complex64, complex128]:
            image = array([[1,2,3], [4,5,6]], itype)
            fn = mktemp('.tif')

            if 0:
                tif = TIFF.open(fn,'w')
                tif.write_image(image, compression=compression)
                tif.close()
            else:
                tif = TIFFimage(image)
                tif.write_file(fn, compression=compression)
                del tif

            tif = TIFFfile(fn)
            data, names = tif.get_samples()
            assert names==['sample0'],`names`
            assert len(data)==1, `len(data)`
            assert image.dtype==data[0].dtype, `image.dtype, data[0].dtype`
            assert (image==data[0]).all()
            
            #os.remove(fn)
            atexit.register(os.remove, fn)
Пример #11
0
    def create_plane(self,roi,sizeX,sizeY,sizeC,description):
        tif_image = TIFF.open(self.filename, 'w')
        im_dtype = np.dtype('uint8')
        image_data = np.zeros((sizeC,sizeY,sizeX),dtype=im_dtype)
        print 'num channels=',sizeC
        for c in range(sizeC):
            if c == 0:
                tif_image.set_description(description)
            channel = self.channels[c]
            imarray = self.mkplane(roi,c)
            print 'imarray shape:',imarray.shape
            
            print("Writing channel:  ", c+1)
            if self.rotation == 0:
                plane = imarray[0,:,:] 
            if self.rotation == 1:
                plane = np.rot90(imarray[0,:,:],1)
            elif self.rotation == 2:
                plane = np.rot90(imarray[0,:,:],3)
                
            image_data[c,:,:] = plane    
#        tif_image = TIFFimage(image_data,description=description)
#        tif_image.write_file(self.filename,compression='lzw') 
#        del tif_image  
        tif_image.write_image(image_data, compression=self.compression.encode('ascii','ignore'))
        tif_image.close()
Пример #12
0
def image_to_tiff(src_path, des_path):
    from libtiff import TIFF
    image = Image.open(src_path)
    image = image.convert('L')
    tiff_out = TIFF.open(des_path, 'w')
    tiff_out.write_image(image, compression=None, write_rgb=True)
    tiff_out.close()
Пример #13
0
    def __init__(self, filename, process_func=None, dtype=None,
                 as_grey=False):
        self._filename = filename
        self._tiff = TIFF.open(filename)

        self._count = 1
        while not self._tiff.LastDirectory():
            self._count += 1
            self._tiff.ReadDirectory()

        # reset to 0
        self._tiff.SetDirectory(0)

        tmp = self._tiff.read_image()
        if dtype is None:
            self._dtype = tmp.dtype
        else:
            self._dtype = dtype

        self._im_sz = tmp.shape

        self._byte_swap = bool(self._tiff.IsByteSwapped())

        self._validate_process_func(process_func)
        self._as_grey(as_grey, process_func)
Пример #14
0
 def __init__(self, filename):
     self.filename = filename
     self.tiff = TIFF.open(filename)
     self._count = self._count_frames()  # used once by TiffStack
     self._shape = self.tiff.read_image().shape  # used once by TiffStack
     self.end = False
     self.generator = self.tiff.iter_images()
Пример #15
0
def save_images_to_disk():
	print('Disk-saving thread active...')
	n = 0
	frameTimeOutputFile = open(planOutputPath+'frameTimes.txt','w')
	frameTimeOutputFile.write('frameCount\t n\t frameCond\t frameT\t interval\n')
	currdict = im_queue.get()
	while currdict is not None:
		frameTimeOutputFile.write('%i\t %i\t %i\t %s\t %s\n' % (int(currdict['frame']),n,int(currdict['cond']),currdict['time'],currdict['interval']))
		if save_as_tiff:
			fname = '%s/frame%i.tiff' % (dataOutputPath,int(currdict['frame']))
			tiff = TIFF.open(fname, mode='w')
			tiff.write_image(currdict['im'])
			tiff.close()
	
		elif save_as_npz:
			np.savez_compressed('%s/test%d.npz' % (output_path, n), currdict['im'])
		else:
			fname = '%s/frame%i.tiff' % (dataOutputPath,int(currdict['frame']),)
			with open(fname, 'wb') as f:
				pkl.dump(currdict, f, protocol=pkl.HIGHEST_PROTOCOL)
			
#		print 'DONE SAVING FRAME: ', currdict['frame'], n #fdict
		n += 1
		currdict = im_queue.get()
		
	disk_writer_alive = False
	#frameTimeOutputFile.close()
	print('Disk-saving thread inactive...')
def get_list_of_stimulus_name(Working_Directory):
    ## To find num z planes in each trial directory
    num_z_planes = []
    Name_stimulus = list()
    
    Stimulus_Directories = [f for f in os.listdir(Working_Directory) if os.path.isdir(os.path.join(Working_Directory, f)) and f.find('Figures')<0]

    for ii in xrange(0, size(Stimulus_Directories, axis = 0)):
        Trial_Directories = [f for f in os.listdir(os.path.join(Working_Directory, Stimulus_Directories[ii]))\
        if os.path.isdir(os.path.join(Working_Directory, Stimulus_Directories[ii], f)) and f.find('Figures')<0] #Get only directories        
        temp_num_z_planes = zeros((size(Trial_Directories)), dtype=int)    
        
        for jj in xrange(0, size(Trial_Directories, axis = 0)):
            Image_Directory = os.path.join(Working_Directory, Stimulus_Directories[ii], Trial_Directories[jj], 'C=1')+filesep    
            tif = TIFF.open(Image_Directory +'T=1.tif', mode='r') #Open multitiff 
            count = 1        
            for image in tif.iter_images():
                temp_num_z_planes[jj] = count
                count = count+1
        
        num_z_planes.append(temp_num_z_planes)
                  
    for ii in xrange(0, size(Stimulus_Directories, axis = 0)):        
        Trial_Directories = [f for f in os.listdir(os.path.join(Working_Directory, Stimulus_Directories[ii]))\
        if os.path.isdir(os.path.join(Working_Directory, Stimulus_Directories[ii], f)) and f.find('Figures')<0] #Get only directories                
        for jj in xrange(0, size(Trial_Directories, axis = 0)):
            for kk in xrange(0, num_z_planes[ii][jj]):
                name_for_saving_figures = Stimulus_Directories[ii] + ' ' + Trial_Directories[jj] + ' Z=' + str(kk+1)       
                Name_stimulus.append(name_for_saving_figures)
    
    return Name_stimulus
Пример #17
0
 def img_data_write(self, im_data, save_path):
     list_r = []
     list_g = []
     list_b = []
     for frame_data in im_data:
         list_r.append(frame_data[:, :, 0])
         list_g.append(frame_data[:, :, 1])
         list_b.append(frame_data[:, :, 2])
     tiff = TIFF.open(save_path + "r.tiff", "w")
     tiff.write_image(list_r)
     tiff.close()
     tiff = TIFF.open(save_path + "g.tiff", "w")
     tiff.write_image(list_g)
     tiff.close()
     tiff = TIFF.open(save_path + "b.tiff", "w")
     tiff.write_image(list_b)
     tiff.close()
Пример #18
0
def read_2D_image(filename):
    """Read 2D image from filename and return it as a numpy array"""

    f_tiff = TIFF.open(filename)

    im_array = f_tiff.read_image()

    return im_array
def convert(imagesDir):
	if(os.path.isfile(imagesDir + "/labels.tif")): os.remove(imagesDir + "/labels.tif")
	if(os.path.isfile(imagesDir + "/predictions.tif")): os.remove(imagesDir + "/predictions.tif")
	

	trainNames = sorted(os.listdir(imagesDir), key=natural_key)

	labels = TIFF.open(imagesDir + "/labels.tif", "w")
	predictions = TIFF.open(imagesDir + "/predictions.tif", "w")

	for name in trainNames:
		image = misc.imread(imagesDir + "/" + name + '/labels.png').astype(np.float32)
		image = image / 255
		labels.write_image(image)
		image = misc.imread(imagesDir + "/" + name + '/predictions.png').astype(np.float32)
		image = image / 255
		predictions.write_image(image)
Пример #20
0
def test_write_read():
    for itype in [uint8, uint16, uint32, uint64,
                  int8, int16, int32, int64,
                  float32, float64,
                  complex64, complex128]:
        image = array([[1, 2, 3], [4, 5, 6]], itype)
        fn = mktemp('.tif')
        tif = TIFF.open(fn, 'w')
        tif.write_image(image)
        tif.close()

        tif = TIFF.open(fn, 'r')
        image2 = tif.read_image()
        tif.close()
        os.remove(fn)

        assert image.dtype == image2.dtype
        assert (image == image2).all()
Пример #21
0
    def test_fft(self, dimension, expected):
        input_name = data_path('sinogram-00005.tif')
        output_name = self.tmp_path('r-00000.tif')

        reader = self.get_task('reader', path=input_name)
        writer = self.get_task('writer', filename=self.tmp_path('r-%05i.tif'))
        fft = self.get_task('fft', dimensions=dimension)
        ifft = self.get_task('ifft', dimensions=dimension)

        self.graph.connect_nodes(reader, fft)
        self.graph.connect_nodes(fft, ifft)
        self.graph.connect_nodes(ifft, writer)
        self.sched.run(self.graph)

        ref_img = TIFF.open(input_name, mode='r').read_image()
        res_img = TIFF.open(output_name, mode='r').read_image()
        diff = np.sum(np.abs(ref_img - res_img))
        self.assertLess(diff, expected)
 def generate_augmented_image(self):
     """Generate augmented image with x,y,z points."""
     out_fname = os.path.join(self.segment_me_dir, self.get_filename())
     tif = TIFF.open(out_fname, 'w')
     for z, (cell_wall_im, measurement_im) in enumerate(zip(self.cell_wall_images,
                                             self.measurement_images)):
         point_im = self.get_selected_points_image(z)
         aug_im_rgb = np.array([cell_wall_im, measurement_im, point_im])
         tif.write_image(aug_im_rgb, write_rgb=True)
 def generate_answer_image(self):
     """Generate image augmented with the answer."""
     out_fname = os.path.join(self.answer_dir, self.get_filename())
     tif = TIFF.open(out_fname, 'w')
     for z, (cell_wall_im, measurement_im) in enumerate(zip(self.cell_wall_images,
                                             self.measurement_images)):
         seg_im = self.get_segmentation_outline_image(z)
         aug_im_rgb = np.array([cell_wall_im, measurement_im, seg_im])
         tif.write_image(aug_im_rgb, write_rgb=True)
Пример #24
0
def test_slicing():
    shape = (16, 16)
    image = random.randint(255, size=shape)
    
    for i in range(shape[0]):
        for j in range(shape[1]):
            image1 = image[:i+1,:j+1]
            fn = mktemp('.tif')
            tif = TIFF.open(fn,'w')
            tif.write_image(image1)
            tif.close()
        
            tif = TIFF.open(fn,'r')
            image2 = tif.read_image()
            tif.close()
            
            assert (image1==image2).all(),`i,j`

            os.remove(fn)
def save_multipage_TIFF(input_filenames, output_filename):
    """Read in a list of input filenames and write out as a multi-page TIFF"""
    raise NotImplemented("Need to update for multiple planes")
    from libtiff import TIFF
    f = TIFF.open(input_filenames[0], 'r')
    first_img = f.read_image()
    f.close()

    output_array = np.empty(
        [first_img.shape[0], first_img.shape[1], len(input_filenames)],
        dtype=first_img.dtype)
    for idx, filename in enumerate(input_filenames):
        f = TIFF.open(filename, 'r')
        output_array[:, :, idx] = f.read_image()
        f.close()

    f = TIFF.open(output_filename, 'w')
    f.write_image(output_array)
    f.close()
Пример #26
0
 def _iter_pages(self):
     if libtiff_available:
         tiff = TIFF.open(self._path, 'r')
         for frame in tiff.iter_images():
             yield frame.astype(float)
     else:
         for frame in self.stack.pages:
             yield frame.asarray(colormapped=False)
     if libtiff_available:
         tiff.close()
Пример #27
0
def readtiff(path):
    frames = []
    tif = TIFF.open(path, mode='r')

    try:
        for cc, tframe in enumerate(tif.iter_images()):
            frames.append(tframe)
    except EOFError:
        pass
    return frames
def plot_colormaps_each(maps, Working_Directory, name_for_saving_figures, pp, matched_pixels, unique_clrs):
    
    Trial_Directories = [f for f in os.listdir(os.path.join(Working_Directory)) if os.path.isdir(os.path.join(Working_Directory, f)) and f.find('Figures')<0 and f.find('DataFrames')<0] #Get only directories
    
    ## To find num z planes in each trial directory
    num_z_planes = np.zeros((np.size(Trial_Directories)), dtype=np.int)
    for jj in xrange(0, np.size(Trial_Directories, axis = 0)):
        Image_Directory = os.path.join(Working_Directory, Trial_Directories[jj],'C=1')+filesep    
        tif = TIFF.open(Image_Directory +'T=1.tif', mode='r') #Open multitiff 
        count = 1        
        for image in tif.iter_images():
            num_z_planes[jj] = count
            count = count+1
    
    count = 0     
    count_trial1 = 0
    for ii in xrange(0, np.size(Trial_Directories, axis = 0)):       
        count_subplot = 1
        for jj in xrange(0, num_z_planes[ii]):
            name_for_saving_figures1 = name_for_saving_figures + ' ' + Trial_Directories[ii] + ' Z=' + str(jj+1)
            with sns.axes_style("darkgrid"):           
                fig2 = plt.subplot(2,2,count_subplot)
                plt.imshow(maps[:,:,count,:])
                plt.title(name_for_saving_figures1)
                plt.axis('off')
            count = count+1
            count_subplot = count_subplot + 1
            
            # If there are more than 6 panel, save and start new figure
            if count_subplot == 5:
                fig2 = plt.gcf()
                pp.savefig(fig2)
                plt.close()
                count_subplot = 1
                    
        #Plot boxplots for each trial
        if count_subplot <= 4:
            with sns.axes_style("darkgrid"):
                fig2 = plt.subplot(2,2,count_subplot)
                fig2 = plot_boxplot(fig2, matched_pixels[:,count_trial1:count_trial1+num_z_planes[ii]], unique_clrs)
#                plt.tight_layout()            
                fig2 = plt.gcf()
                pp.savefig(fig2)
                plt.close()
            count_trial1 = count_trial1 + num_z_planes[ii]
            
        else:
            with sns.axes_style("darkgrid"):
                fig3 = plt.figure()
                fig3 = plot_boxplot(fig3, matched_pixels[:,count_trial1:count_trial1+num_z_planes[ii]], unique_clrs)
#                plt.tight_layout()            
                fig3 = plt.gcf()
                pp.savefig(fig3)
                plt.close()
            count_trial1 = count_trial1 + num_z_planes[ii]
Пример #29
0
def writeData(data, outputFilename):
    """
    Writes data to a tiff, hdf5, or npy file.

    Parameters
    ----------
    data : 3D numpy array
        The data to be written. Must have 3 dimensions, i.e. data.ndim == 3
    outputFilename : string
        The absolute or relative location of the particular file to be read
        in. outputFilename must end in one of the following extensions
        ['.tif', '.tiff', '.hdf5', '.h5', '.npy'].

    Notes
    -----
    - Data to be saved must be a 3D array.

    """

    assert data.ndim==3, "Can only write out 3D hdf5, tiff, and numpy files"
    filename = outputFilename.rstrip('/')
    basePath, fName = os.path.split(filename)
    name, ext = os.path.splitext(fName)
    if basePath and not os.path.exists(basePath):
        raise IOError, "Directory does not exist: %s" % (basePath)

    if ext.lower() in ['.npy']:
        try:
            np.save(filename, np.array(data,dtype=np.float32))
        except IOError:
            raise IOError, "Error writing npy data to: \"%s\"" % filename

    elif ext.lower() in ['.h5', '.hdf5']:
        from h5py import File
        try:
            h5File = File(filename, "w")
        except IOError:
            raise IOError, "Error creating writable hdf5 file at: \"%s\"" % filename

        shp = data.shape
        comp="gzip"
        compOpts=1
        dset = h5File.create_dataset("/raw", shp, np.float32, data, chunks=shp, compression=comp, compression_opts=compOpts)

    elif ext.lower() in ['.tif', '.tiff']:
        from libtiff import TIFF
        try:
            tiff = TIFF.open(filename, 'w')
            tiff.write_image(np.array(data,dtype=np.float32))
        except IOError:
            raise IOError, "Error writing tif file at: \"%s\"" % filename
        tiff.close()

    else:
        assert False, "Can only write out 3D hdf5, tiff, and numpy files"
Пример #30
0
def write_libtiff(file_name, data):
    """Write a TIFF file using pylibtiff. Return the written file name."""
    from libtiff import TIFF

    tiff_file = TIFF.open(file_name, "w")
    try:
        tiff_file.write_image(data)
    finally:
        tiff_file.close()

    return file_name
Пример #31
0
def save_tiff(arr: np.ndarray, file_path: str) -> None:
    temp_img = TIFF.open(file_path, 'w')
    temp_img.write_image(arr)
Пример #32
0
                            metavar="/path/to/.pb",
                            help='Directory of the PB dataset(from export model)')
    parser.add_argument('--TEST_COLOR_URL', required=False,
                            metavar="/path/to/color.xls",
                            help='Directory of the brain region color')
    args = parser.parse_args()
    print(args.TEST_IMAGE_PATH)

    config.TEST_IMAGE_PATH = args.TEST_IMAGE_PATH
    config.TEST_OUT_PATH = args.TEST_OUT_PATH
    config.TEST_PB_PATH = args.TEST_PB_PATH
    config.TEST_COLOR_URL = args.TEST_COLOR_URL

    MODEL = birdsModel()

    open_tiff = TIFF.open(config.TEST_IMAGE_PATH)

    out_org_url = config.TEST_OUT_PATH+'out_org.tif'
    out_org_tiff = TIFF.open(out_org_url,mode='w')
    out_line_url = config.TEST_OUT_PATH+'out_line.tif'
    out_line_tiff = TIFF.open(out_line_url,mode='w')
    out_color_url = config.TEST_OUT_PATH+'out_color.tif'
    out_color_tiff = TIFF.open(out_color_url,mode='w')
    line_stack = []
    color_stack = []
    for img in list(open_tiff.iter_images()):
        print(img.shape)
        org_image = MODEL.run(img)
        out_org_tiff.write_image(org_image,compression = None, write_rgb = True)
        line_image = np.array(org_image)
        color_image = np.array(org_image)
Пример #33
0
    def test_save_dataset_palette(self):
        """Test writer operation as palette."""
        import os
        import numpy as np
        from libtiff import TIFF
        from satpy.writers.mitiff import MITIFFWriter

        expected = np.full((100, 200), 0)

        exp_c = ([
            0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
        ], [
            1, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
        ], [
            2, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
        ])

        color_map = [[0, 3], [1, 4], [2, 5]]
        pal_desc = ['test', 'test2']
        unit = "Test"

        dataset = self._get_test_one_dataset()
        palette = {
            'palette': True,
            'palette_color_map': color_map,
            'palette_description': pal_desc,
            'palette_unit': unit,
            'palette_channel_name': dataset.attrs['name']
        }

        w = MITIFFWriter(base_dir=self.base_dir)
        w.save_dataset(dataset, **palette)
        filename = "{:s}_{:%Y%m%d_%H%M%S}.mitiff".format(
            dataset.attrs['name'], dataset.attrs['start_time'])
        tif = TIFF.open(os.path.join(self.base_dir, filename))
        # Need to check PHOTOMETRIC is 3, ie palette
        self.assertEqual(tif.GetField('PHOTOMETRIC'), 3)
        colormap = tif.GetField('COLORMAP')
        # Check the colormap of the palette image
        self.assertEqual(colormap, exp_c)
        IMAGEDESCRIPTION = 270
        imgdesc = (tif.GetField(IMAGEDESCRIPTION)).decode('utf-8').split('\n')
        found_color_info = False
        unit_name_found = False
        name_length_found = False
        name_length = 0
        names = []
        unit_name = None
        for key in imgdesc:
            if name_length_found and name_length > len(names):
                names.append(key)
                continue
            elif unit_name_found:
                name_length = int(key)
                name_length_found = True
                unit_name_found = False
            elif found_color_info:
                unit_name = key
                unit_name_found = True
                found_color_info = False
            elif 'COLOR INFO:' in key:
                found_color_info = True
        # Check the name of the palette description
        self.assertEqual(name_length, 2)
        # Check the name and unit name of the palette
        self.assertEqual(unit_name, ' Test')
        # Check the palette description of the palette
        self.assertEqual(names, [' test', ' test2'])
        for image in tif.iter_images():
            np.testing.assert_allclose(image, expected, atol=1.e-6, rtol=0)
Пример #34
0
def dump_to_tiff(ndarray, filename):
    with closing(TIFF.open(filename, mode="w")) as fo:
        fo.write_image(ndarray)
Пример #35
0
 def _count_frames(self):
     return len([1 for _ in TIFF.open(self.filename).iter_images()])
Пример #36
0
    def test_save_dataset_with_calibration(self):
        """Test writer operation with calibration."""
        import os
        import numpy as np
        from libtiff import TIFF
        from satpy.writers.mitiff import MITIFFWriter

        expected_ir = np.full((100, 200), 255)
        expected_vis = np.full((100, 200), 0)
        expected = np.stack([
            expected_vis, expected_vis, expected_ir, expected_ir, expected_ir,
            expected_vis
        ])
        expected_key_channel = [
            'Table_calibration: 1-VIS0.63, Reflectance(Albedo), [%], 8, [ 0.00 0.39 0.78 1.18 1.57 '
            '1.96 2.35 2.75 3.14 3.53 3.92 4.31 4.71 5.10 5.49 5.88 6.27 6.67 7.06 7.45 7.84 8.24 '
            '8.63 9.02 9.41 9.80 10.20 10.59 10.98 11.37 11.76 12.16 12.55 12.94 13.33 13.73 14.12 '
            '14.51 14.90 15.29 15.69 16.08 16.47 16.86 17.25 17.65 18.04 18.43 18.82 19.22 19.61 '
            '20.00 20.39 20.78 21.18 21.57 21.96 22.35 22.75 23.14 23.53 23.92 24.31 24.71 25.10 '
            '25.49 25.88 26.27 26.67 27.06 27.45 27.84 28.24 28.63 29.02 29.41 29.80 30.20 30.59 '
            '30.98 31.37 31.76 32.16 32.55 32.94 33.33 33.73 34.12 34.51 34.90 35.29 35.69 36.08 '
            '36.47 36.86 37.25 37.65 38.04 38.43 38.82 39.22 39.61 40.00 40.39 40.78 41.18 41.57 '
            '41.96 42.35 42.75 43.14 43.53 43.92 44.31 44.71 45.10 45.49 45.88 46.27 46.67 47.06 '
            '47.45 47.84 48.24 48.63 49.02 49.41 49.80 50.20 50.59 50.98 51.37 51.76 52.16 52.55 '
            '52.94 53.33 53.73 54.12 54.51 54.90 55.29 55.69 56.08 56.47 56.86 57.25 57.65 58.04 '
            '58.43 58.82 59.22 59.61 60.00 60.39 60.78 61.18 61.57 61.96 62.35 62.75 63.14 63.53 '
            '63.92 64.31 64.71 65.10 65.49 65.88 66.27 66.67 67.06 67.45 67.84 68.24 68.63 69.02 '
            '69.41 69.80 70.20 70.59 70.98 71.37 71.76 72.16 72.55 72.94 73.33 73.73 74.12 74.51 '
            '74.90 75.29 75.69 76.08 76.47 76.86 77.25 77.65 78.04 78.43 78.82 79.22 79.61 80.00 '
            '80.39 80.78 81.18 81.57 81.96 82.35 82.75 83.14 83.53 83.92 84.31 84.71 85.10 85.49 '
            '85.88 86.27 86.67 87.06 87.45 87.84 88.24 88.63 89.02 89.41 89.80 90.20 90.59 90.98 '
            '91.37 91.76 92.16 92.55 92.94 93.33 93.73 94.12 94.51 94.90 95.29 95.69 96.08 96.47 '
            '96.86 97.25 97.65 98.04 98.43 98.82 99.22 99.61 100.00 ]',
            'Table_calibration: 2-VIS0.86, Reflectance(Albedo), [%], 8, [ 0.00 0.39 0.78 1.18 1.57 '
            '1.96 2.35 2.75 3.14 3.53 3.92 4.31 4.71 5.10 5.49 5.88 6.27 6.67 7.06 7.45 7.84 8.24 '
            '8.63 9.02 9.41 9.80 10.20 10.59 10.98 11.37 11.76 12.16 12.55 12.94 13.33 13.73 14.12 '
            '14.51 14.90 15.29 15.69 16.08 16.47 16.86 17.25 17.65 18.04 18.43 18.82 19.22 19.61 '
            '20.00 20.39 20.78 21.18 21.57 21.96 22.35 22.75 23.14 23.53 23.92 24.31 24.71 25.10 '
            '25.49 25.88 26.27 26.67 27.06 27.45 27.84 28.24 28.63 29.02 29.41 29.80 30.20 30.59 '
            '30.98 31.37 31.76 32.16 32.55 32.94 33.33 33.73 34.12 34.51 34.90 35.29 35.69 36.08 '
            '36.47 36.86 37.25 37.65 38.04 38.43 38.82 39.22 39.61 40.00 40.39 40.78 41.18 41.57 '
            '41.96 42.35 42.75 43.14 43.53 43.92 44.31 44.71 45.10 45.49 45.88 46.27 46.67 47.06 '
            '47.45 47.84 48.24 48.63 49.02 49.41 49.80 50.20 50.59 50.98 51.37 51.76 52.16 52.55 '
            '52.94 53.33 53.73 54.12 54.51 54.90 55.29 55.69 56.08 56.47 56.86 57.25 57.65 58.04 '
            '58.43 58.82 59.22 59.61 60.00 60.39 60.78 61.18 61.57 61.96 62.35 62.75 63.14 63.53 '
            '63.92 64.31 64.71 65.10 65.49 65.88 66.27 66.67 67.06 67.45 67.84 68.24 68.63 69.02 '
            '69.41 69.80 70.20 70.59 70.98 71.37 71.76 72.16 72.55 72.94 73.33 73.73 74.12 74.51 '
            '74.90 75.29 75.69 76.08 76.47 76.86 77.25 77.65 78.04 78.43 78.82 79.22 79.61 80.00 '
            '80.39 80.78 81.18 81.57 81.96 82.35 82.75 83.14 83.53 83.92 84.31 84.71 85.10 85.49 '
            '85.88 86.27 86.67 87.06 87.45 87.84 88.24 88.63 89.02 89.41 89.80 90.20 90.59 90.98 '
            '91.37 91.76 92.16 92.55 92.94 93.33 93.73 94.12 94.51 94.90 95.29 95.69 96.08 96.47 '
            '96.86 97.25 97.65 98.04 98.43 98.82 99.22 99.61 100.00 ]',
            u'Table_calibration: 3(3B)-IR3.7, BT, °[C], 8, [ 50.00 49.22 48.43 47.65 46.86 46.08 '
            '45.29 44.51 43.73 42.94 42.16 41.37 40.59 39.80 39.02 38.24 37.45 36.67 35.88 35.10 '
            '34.31 33.53 32.75 31.96 31.18 30.39 29.61 28.82 28.04 27.25 26.47 25.69 24.90 24.12 '
            '23.33 22.55 21.76 20.98 20.20 19.41 18.63 17.84 17.06 16.27 15.49 14.71 13.92 13.14 '
            '12.35 11.57 10.78 10.00 9.22 8.43 7.65 6.86 6.08 5.29 4.51 3.73 2.94 2.16 1.37 0.59 '
            '-0.20 -0.98 -1.76 -2.55 -3.33 -4.12 -4.90 -5.69 -6.47 -7.25 -8.04 -8.82 -9.61 -10.39 '
            '-11.18 -11.96 -12.75 -13.53 -14.31 -15.10 -15.88 -16.67 -17.45 -18.24 -19.02 -19.80 '
            '-20.59 -21.37 -22.16 -22.94 -23.73 -24.51 -25.29 -26.08 -26.86 -27.65 -28.43 -29.22 '
            '-30.00 -30.78 -31.57 -32.35 -33.14 -33.92 -34.71 -35.49 -36.27 -37.06 -37.84 -38.63 '
            '-39.41 -40.20 -40.98 -41.76 -42.55 -43.33 -44.12 -44.90 -45.69 -46.47 -47.25 -48.04 '
            '-48.82 -49.61 -50.39 -51.18 -51.96 -52.75 -53.53 -54.31 -55.10 -55.88 -56.67 -57.45 '
            '-58.24 -59.02 -59.80 -60.59 -61.37 -62.16 -62.94 -63.73 -64.51 -65.29 -66.08 -66.86 '
            '-67.65 -68.43 -69.22 -70.00 -70.78 -71.57 -72.35 -73.14 -73.92 -74.71 -75.49 -76.27 '
            '-77.06 -77.84 -78.63 -79.41 -80.20 -80.98 -81.76 -82.55 -83.33 -84.12 -84.90 -85.69 '
            '-86.47 -87.25 -88.04 -88.82 -89.61 -90.39 -91.18 -91.96 -92.75 -93.53 -94.31 -95.10 '
            '-95.88 -96.67 -97.45 -98.24 -99.02 -99.80 -100.59 -101.37 -102.16 -102.94 -103.73 '
            '-104.51 -105.29 -106.08 -106.86 -107.65 -108.43 -109.22 -110.00 -110.78 -111.57 '
            '-112.35 -113.14 -113.92 -114.71 -115.49 -116.27 -117.06 -117.84 -118.63 -119.41 '
            '-120.20 -120.98 -121.76 -122.55 -123.33 -124.12 -124.90 -125.69 -126.47 -127.25 '
            '-128.04 -128.82 -129.61 -130.39 -131.18 -131.96 -132.75 -133.53 -134.31 -135.10 '
            '-135.88 -136.67 -137.45 -138.24 -139.02 -139.80 -140.59 -141.37 -142.16 -142.94 '
            '-143.73 -144.51 -145.29 -146.08 -146.86 -147.65 -148.43 -149.22 -150.00 ]',
            u'Table_calibration: 4-IR10.8, BT, °[C], 8, [ 50.00 49.22 48.43 47.65 46.86 46.08 '
            '45.29 '
            '44.51 43.73 42.94 42.16 41.37 40.59 39.80 39.02 38.24 37.45 36.67 35.88 35.10 34.31 '
            '33.53 32.75 31.96 31.18 30.39 29.61 28.82 28.04 27.25 26.47 25.69 24.90 24.12 23.33 '
            '22.55 21.76 20.98 20.20 19.41 18.63 17.84 17.06 16.27 15.49 14.71 13.92 13.14 12.35 '
            '11.57 10.78 10.00 9.22 8.43 7.65 6.86 6.08 5.29 4.51 3.73 2.94 2.16 1.37 0.59 -0.20 '
            '-0.98 -1.76 -2.55 -3.33 -4.12 -4.90 -5.69 -6.47 -7.25 -8.04 -8.82 -9.61 -10.39 -11.18 '
            '-11.96 -12.75 -13.53 -14.31 -15.10 -15.88 -16.67 -17.45 -18.24 -19.02 -19.80 -20.59 '
            '-21.37 -22.16 -22.94 -23.73 -24.51 -25.29 -26.08 -26.86 -27.65 -28.43 -29.22 -30.00 '
            '-30.78 -31.57 -32.35 -33.14 -33.92 -34.71 -35.49 -36.27 -37.06 -37.84 -38.63 -39.41 '
            '-40.20 -40.98 -41.76 -42.55 -43.33 -44.12 -44.90 -45.69 -46.47 -47.25 -48.04 -48.82 '
            '-49.61 -50.39 -51.18 -51.96 -52.75 -53.53 -54.31 -55.10 -55.88 -56.67 -57.45 -58.24 '
            '-59.02 -59.80 -60.59 -61.37 -62.16 -62.94 -63.73 -64.51 -65.29 -66.08 -66.86 -67.65 '
            '-68.43 -69.22 -70.00 -70.78 -71.57 -72.35 -73.14 -73.92 -74.71 -75.49 -76.27 -77.06 '
            '-77.84 -78.63 -79.41 -80.20 -80.98 -81.76 -82.55 -83.33 -84.12 -84.90 -85.69 -86.47 '
            '-87.25 -88.04 -88.82 -89.61 -90.39 -91.18 -91.96 -92.75 -93.53 -94.31 -95.10 -95.88 '
            '-96.67 -97.45 -98.24 -99.02 -99.80 -100.59 -101.37 -102.16 -102.94 -103.73 -104.51 '
            '-105.29 -106.08 -106.86 -107.65 -108.43 -109.22 -110.00 -110.78 -111.57 -112.35 '
            '-113.14 -113.92 -114.71 -115.49 -116.27 -117.06 -117.84 -118.63 -119.41 -120.20 '
            '-120.98 -121.76 -122.55 -123.33 -124.12 -124.90 -125.69 -126.47 -127.25 -128.04 '
            '-128.82 -129.61 -130.39 -131.18 -131.96 -132.75 -133.53 -134.31 -135.10 -135.88 '
            '-136.67 -137.45 -138.24 -139.02 -139.80 -140.59 -141.37 -142.16 -142.94 -143.73 '
            '-144.51 -145.29 -146.08 -146.86 -147.65 -148.43 -149.22 -150.00 ]',
            u'Table_calibration: 5-IR11.5, BT, °[C], 8, [ 50.00 49.22 48.43 47.65 46.86 46.08 '
            '45.29 '
            '44.51 43.73 42.94 42.16 41.37 40.59 39.80 39.02 38.24 37.45 36.67 35.88 35.10 34.31 '
            '33.53 32.75 31.96 31.18 30.39 29.61 28.82 28.04 27.25 26.47 25.69 24.90 24.12 23.33 '
            '22.55 21.76 20.98 20.20 19.41 18.63 17.84 17.06 16.27 15.49 14.71 13.92 13.14 12.35 '
            '11.57 10.78 10.00 9.22 8.43 7.65 6.86 6.08 5.29 4.51 3.73 2.94 2.16 1.37 0.59 -0.20 '
            '-0.98 -1.76 -2.55 -3.33 -4.12 -4.90 -5.69 -6.47 -7.25 -8.04 -8.82 -9.61 -10.39 -11.18 '
            '-11.96 -12.75 -13.53 -14.31 -15.10 -15.88 -16.67 -17.45 -18.24 -19.02 -19.80 -20.59 '
            '-21.37 -22.16 -22.94 -23.73 -24.51 -25.29 -26.08 -26.86 -27.65 -28.43 -29.22 -30.00 '
            '-30.78 -31.57 -32.35 -33.14 -33.92 -34.71 -35.49 -36.27 -37.06 -37.84 -38.63 -39.41 '
            '-40.20 -40.98 -41.76 -42.55 -43.33 -44.12 -44.90 -45.69 -46.47 -47.25 -48.04 -48.82 '
            '-49.61 -50.39 -51.18 -51.96 -52.75 -53.53 -54.31 -55.10 -55.88 -56.67 -57.45 -58.24 '
            '-59.02 -59.80 -60.59 -61.37 -62.16 -62.94 -63.73 -64.51 -65.29 -66.08 -66.86 -67.65 '
            '-68.43 -69.22 -70.00 -70.78 -71.57 -72.35 -73.14 -73.92 -74.71 -75.49 -76.27 -77.06 '
            '-77.84 -78.63 -79.41 -80.20 -80.98 -81.76 -82.55 -83.33 -84.12 -84.90 -85.69 -86.47 '
            '-87.25 -88.04 -88.82 -89.61 -90.39 -91.18 -91.96 -92.75 -93.53 -94.31 -95.10 -95.88 '
            '-96.67 -97.45 -98.24 -99.02 -99.80 -100.59 -101.37 -102.16 -102.94 -103.73 -104.51 '
            '-105.29 -106.08 -106.86 -107.65 -108.43 -109.22 -110.00 -110.78 -111.57 -112.35 '
            '-113.14 -113.92 -114.71 -115.49 -116.27 -117.06 -117.84 -118.63 -119.41 -120.20 '
            '-120.98 -121.76 -122.55 -123.33 -124.12 -124.90 -125.69 -126.47 -127.25 -128.04 '
            '-128.82 -129.61 -130.39 -131.18 -131.96 -132.75 -133.53 -134.31 -135.10 -135.88 '
            '-136.67 -137.45 -138.24 -139.02 -139.80 -140.59 -141.37 -142.16 -142.94 -143.73 '
            '-144.51 -145.29 -146.08 -146.86 -147.65 -148.43 -149.22 -150.00 ]',
            'Table_calibration: 6(3A)-VIS1.6, Reflectance(Albedo), [%], 8, [ 0.00 0.39 0.78 1.18 '
            '1.57 1.96 2.35 2.75 3.14 3.53 3.92 4.31 4.71 5.10 5.49 5.88 6.27 6.67 7.06 7.45 7.84 '
            '8.24 8.63 9.02 9.41 9.80 10.20 10.59 10.98 11.37 11.76 12.16 12.55 12.94 13.33 13.73 '
            '14.12 14.51 14.90 15.29 15.69 16.08 16.47 16.86 17.25 17.65 18.04 18.43 18.82 19.22 '
            '19.61 20.00 20.39 20.78 21.18 21.57 21.96 22.35 22.75 23.14 23.53 23.92 24.31 24.71 '
            '25.10 25.49 25.88 26.27 26.67 27.06 27.45 27.84 28.24 28.63 29.02 29.41 29.80 30.20 '
            '30.59 30.98 31.37 31.76 32.16 32.55 32.94 33.33 33.73 34.12 34.51 34.90 35.29 35.69 '
            '36.08 36.47 36.86 37.25 37.65 38.04 38.43 38.82 39.22 39.61 40.00 40.39 40.78 41.18 '
            '41.57 41.96 42.35 42.75 43.14 43.53 43.92 44.31 44.71 45.10 45.49 45.88 46.27 46.67 '
            '47.06 47.45 47.84 48.24 48.63 49.02 49.41 49.80 50.20 50.59 50.98 51.37 51.76 52.16 '
            '52.55 52.94 53.33 53.73 54.12 54.51 54.90 55.29 55.69 56.08 56.47 56.86 57.25 57.65 '
            '58.04 58.43 58.82 59.22 59.61 60.00 60.39 60.78 61.18 61.57 61.96 62.35 62.75 63.14 '
            '63.53 63.92 64.31 64.71 65.10 65.49 65.88 66.27 66.67 67.06 67.45 67.84 68.24 68.63 '
            '69.02 69.41 69.80 70.20 70.59 70.98 71.37 71.76 72.16 72.55 72.94 73.33 73.73 74.12 '
            '74.51 74.90 75.29 75.69 76.08 76.47 76.86 77.25 77.65 78.04 78.43 78.82 79.22 79.61 '
            '80.00 80.39 80.78 81.18 81.57 81.96 82.35 82.75 83.14 83.53 83.92 84.31 84.71 85.10 '
            '85.49 85.88 86.27 86.67 87.06 87.45 87.84 88.24 88.63 89.02 89.41 89.80 90.20 90.59 '
            '90.98 91.37 91.76 92.16 92.55 92.94 93.33 93.73 94.12 94.51 94.90 95.29 95.69 96.08 '
            '96.47 96.86 97.25 97.65 98.04 98.43 98.82 99.22 99.61 100.00 ]'
        ]
        dataset = self._get_test_dataset_calibration()
        w = MITIFFWriter(
            filename=dataset.attrs['metadata_requirements']['file_pattern'],
            base_dir=self.base_dir)
        w.save_dataset(dataset)
        filename = (
            dataset.attrs['metadata_requirements']['file_pattern']).format(
                start_time=dataset.attrs['start_time'])
        tif = TIFF.open(os.path.join(self.base_dir, filename))
        IMAGEDESCRIPTION = 270
        imgdesc = (tif.GetField(IMAGEDESCRIPTION)).decode('utf-8').split('\n')
        found_table_calibration = False
        number_of_calibrations = 0
        for key in imgdesc:
            if 'Table_calibration' in key:
                found_table_calibration = True
                if '1-VIS0.63' in key:
                    self.assertEqual(key, expected_key_channel[0])
                    number_of_calibrations += 1
                elif '2-VIS0.86' in key:
                    self.assertEqual(key, expected_key_channel[1])
                    number_of_calibrations += 1
                elif '3(3B)-IR3.7' in key:
                    self.assertEqual(key, expected_key_channel[2])
                    number_of_calibrations += 1
                elif '4-IR10.8' in key:
                    self.assertEqual(key, expected_key_channel[3])
                    number_of_calibrations += 1
                elif '5-IR11.5' in key:
                    self.assertEqual(key, expected_key_channel[4])
                    number_of_calibrations += 1
                elif '6(3A)-VIS1.6' in key:
                    self.assertEqual(key, expected_key_channel[5])
                    number_of_calibrations += 1
                else:
                    self.fail(
                        "Not a valid channel description i the given key.")
        self.assertTrue(
            found_table_calibration,
            "Table_calibration is not found in the imagedescription.")
        self.assertEqual(number_of_calibrations, 6)
        for i, image in enumerate(tif.iter_images()):
            np.testing.assert_allclose(image, expected[i], atol=1.e-6, rtol=0)
Пример #37
0
#     with rasterio.open('districtTiffFiles/'+distName+'@'+str(st_cen_cd)+'@'+str(censuscode)+".tif", "w", **out_meta) as dest:
#       dest.write(out_image)
inputFolder = 'districtTiffFiles19/'
onlyfiles = [f for f in listdir(inputFolder) if isfile(join(inputFolder, f))]
flattened_DataDictionary = {}

alldim1 = np.array([])

printing_dictionary = {}

for i in range(13):
    for currDFile in onlyfiles:
        #currDistrictFile='districtTiffFiles/Rajkot@[email protected]'
        currDistrictFile = 'districtTiffFiles19/' + currDFile
        tif = TIFF.open(currDistrictFile, mode='r')
        image = tif.read_image()
        imagenum = np.array(image)
        # break
        if (i < 10):
            dataAll = np.array(image)[:, :, i]
        elif (i == 10):  #ndvi
            dataAll = np.array((image[:, :, 3] - (image)[:, :, 2]) /
                               ((image)[:, :, 3] + (image)[:, :, 2]))
        elif (i == 11):  #ndbi
            dataAll = np.array((image[:, :, 4] - (image)[:, :, 3]) /
                               ((image)[:, :, 3] + (image)[:, :, 4]))
            # dataAll = (np.array(image)[:,:,4]-np.array(image)[:,:,3])/(np.array(image)[:,:,4]+np.array(image)[:,:,3])
        elif (i == 12):  #mndwi
            dataAll = np.array((image[:, :, 1] - (image)[:, :, 4]) /
                               ((image)[:, :, 1] + (image)[:, :, 4]))
Пример #38
0
        # 创建文件
        # 先创建驱动,再创建相应的栅格数据集
        driver = gdal.GetDriverByName("GTiff")
        dataset = driver.Create(path, img_width, img_height, num_bands,
                                datatype)
        if dataset is not None:
            if geotrans is not None:
                dataset.SetGeoTransform(geotrans)  # 写入仿射变换参数
            if proj is not None:
                if proj is 'WGS84' or proj is 'wgs84' or proj is 'EPSG:4326' or proj is 'EPSG-4326' or proj is '4326':
                    dataset.SetProjection(projection[0])  # 写入投影
                elif proj is 'EPSG:3857' or proj is 'EPSG-3857' or proj is '3857':
                    dataset.SetProjection(projection[1])  # 写入投影
                else:
                    dataset.SetProjection(proj)  # 写入投影
            for i in range(num_bands):
                dataset.GetRasterBand(i + 1).WriteArray(bands[i])
        print("save image success.")


#%%
geotrans, proj = readImage('D:\\Data\\MOD09GQ\\h27v05_AllWDays_percent.tiff')

tif = TIFF.open('D:\\Data\\MOD09GQ\\output\\h27v05_before_NIR.tiff', mode='r')
NIR = tif.read_image()
tif = TIFF.open('D:\\Data\\MOD09GQ\\output\\h27v05_before_R.tiff', mode='r')
R = tif.read_image()

geotrans /= np.array([1, 2, 1, 1, 1, 2])
writeImage(np.stack((R, NIR)), 'D:\\Data\\MOD09GQ\\output\\h27v05_before.tiff', \
    geotrans=geotrans, proj=proj)
Пример #39
0
import os
import os.path
from osgeo import gdal
import sys
from libtiff import TIFF
from shutil import copyfile
from datetime import datetime, timedelta
import numpy as np
import time


path="s1_tiles_rejected/S1A_20201126T052448_70E0_colwith_S2A_20201127T102401_T32UPG_13_4.tif"
S1_im=TIFF.open(path)
imarray=S1_im.read_image()
for j in range(len(imarray)):
    for i in range(len(imarray[j])):
      print(i)
      if(imarray[j][i][0]==0 or (imarray[j][i][1]==-32768 and imarray[j][i][2]==-32768 and imarray[j][i][3]==-32768 and imarray[j][i][4]==-32768)):
        print("Alert")

'''
for filename in os.listdir("s1_tiles"):
  print(filename)
  path="s1_tiles/"+filename
  S1_im=TIFF.open(path)
  imarray=S1_im.read_image()
  condition=True
  for j in range(len(imarray)):
    for i in range(len(imarray[j])):
      if(imarray[j][i][0]==0 or (imarray[j][i][1]==-32768 and imarray[j][i][2]==-32768 and imarray[j][i][3]==-32768 and imarray[j][i][4]==-32768)):
        condition=False
Пример #40
0
# H2O.A = 18.01488  # g / mol https://en.wikipedia.org/wiki/Molar_mass

pix_size = 1.24e-6  # m [.setup files]
width = 1920 * pix_size  # m

# Calculating number densities with formular from [Jacobsen, Kirz, Howells]
# Densities from the above cited wiki pages.
# Note these are actually the number density LINE INTEGRALS in
# particles / cm^2.

# H2O.ndens = 1 * Na / H2O.A * width * 100e3  # particles / m^2
Os.ndens = 22.59 * Na / Os.A * width * 100e3  # particles / m^2
U.ndens = 19.1 * Na / U.A * width * 100e3  # particles / m^2

# Reading in image files to calculate "Laplacian Spectrums"
Al_phant = TIFF.open('../Data/Projection_Tifs/Al_1080.tif', 'r').read_image()
No_phant = TIFF.open('../Data/Projection_Tifs/No_1080.tif', 'r').read_image()

# Al_phant = zoom(Al_phant, (1 / 10, 1 / 10), order=0)
# No_phant = zoom(No_phant, (1 / 10, 1 / 10), order=0)

# H2O.phant = Al_phant / Al_phant.mean() * H2O.ndens
Os.phant = No_phant / No_phant.mean() * Os.ndens
U.phant = No_phant / No_phant.mean() * U.ndens

print('\nCalculating laplacians:')
# print('H2O...')
# H2O.lap = laplace(H2O.phant)
print('Os...')
Os.lap = laplace(Os.phant)
print('U...\n')
Пример #41
0
    def _save_datasets_as_mitiff(self, datasets, image_description,
                                 gen_filename, **kwargs):
        """Put all togehter and save as a tiff file with the special tag
           making it a mitiff file.
        """
        from libtiff import TIFF

        tif = TIFF.open(gen_filename, mode='w')

        tif.SetField(IMAGEDESCRIPTION, (image_description).encode('utf-8'))

        cns = self.translate_channel_name.get(kwargs['sensor'], {})
        if isinstance(datasets, list):
            LOG.debug("Saving datasets as list")

            for _cn in self.channel_order[kwargs['sensor']]:
                for dataset in datasets:
                    if dataset.attrs['name'] == _cn:
                        reverse_offset = 0.
                        reverse_scale = 1.
                        if dataset.attrs[
                                'calibration'] == 'brightness_temperature':
                            reverse_offset = 255.
                            reverse_scale = -1.
                            dataset.data += KELVIN_TO_CELSIUS

                        # Need to possible translate channels names from satpy to mitiff
                        cn = cns.get(dataset.attrs['name'],
                                     dataset.attrs['name'])
                        _data = reverse_offset + reverse_scale * ((
                            (dataset.data - float(self.mitiff_config[
                                kwargs['sensor']][cn]['min-val'])) /
                            (float(self.mitiff_config[kwargs['sensor']][cn][
                                'max-val']) - float(self.mitiff_config[
                                    kwargs['sensor']][cn]['min-val']))) * 255.)
                        data = _data.clip(0, 255)

                        tif.write_image(data.astype(np.uint8),
                                        compression='deflate')
                        break
        elif 'dataset' in datasets.attrs['name']:
            LOG.debug("Saving %s as a dataset.", datasets.attrs['name'])
            for _cn in self.channel_order[kwargs['sensor']]:
                for i, band in enumerate(datasets['bands']):
                    if band == _cn:
                        chn = datasets.sel(bands=band)
                        reverse_offset = 0.
                        reverse_scale = 1.
                        if chn.attrs['prerequisites'][i][
                                4] == 'brightness_temperature':
                            reverse_offset = 255.
                            reverse_scale = -1.
                            chn.data += KELVIN_TO_CELSIUS

                        # Need to possible translate channels names from satpy to mitiff
                        cn = cns.get(chn.attrs['prerequisites'][i][0],
                                     chn.attrs['prerequisites'][i][0])
                        _data = reverse_offset + reverse_scale * ((
                            (chn.data - float(self.mitiff_config[
                                kwargs['sensor']][cn]['min-val'])) /
                            (float(self.mitiff_config[kwargs['sensor']][cn][
                                'max-val']) - float(self.mitiff_config[
                                    kwargs['sensor']][cn]['min-val']))) * 255.)
                        data = _data.clip(0, 255)

                        tif.write_image(data.astype(np.uint8),
                                        compression='deflate')
                        break

        else:
            LOG.debug("Saving datasets as enhanced image")
            img = get_enhanced_image(datasets.squeeze(), enhance=self.enhancer)
            for i, band in enumerate(img.data['bands']):
                chn = img.data.sel(bands=band)
                data = chn.values * 254. + 1
                data = data.clip(0, 255)
                tif.write_image(data.astype(np.uint8), compression='deflate')

        del tif
        counter += 1
    '''
    return counter


if __name__ == '__main__':
    import os
    fileNames = os.listdir(RAW_VOLUME_PATH)
    processed = 0
    for fileName in fileNames:
        if fileName.split('.')[-1] != 'tif':
            continue
        elif processed < TRAIN_VOLUME_NUM:
            trainRawVolumePath = RAW_VOLUME_PATH + DIR_DIVIDER + fileName
            print('processing ' + trainRawVolumePath)
            trainRawVolume = TIFF.open(trainRawVolumePath)
            imageCounter = splitAndSave(trainRawVolume, TRAIN_IMAGE_PATH,
                                        imageCounter)

            trainMaskVolumePath = MASK_VOLUME_PATH + DIR_DIVIDER + fileName
            print('processing ' + trainMaskVolumePath)
            trainMaskVolume = TIFF.open(trainMaskVolumePath)
            labelCounter = splitAndSave(trainMaskVolume, TRAIN_LABEL_PATH,
                                        labelCounter)
            processed += 1
        # the volume data after the last training volume data will be considered as the testing volume data
        elif processed == TRAIN_VOLUME_NUM:
            print('processing testing data')
            testRawVolume = TIFF.open(RAW_VOLUME_PATH + DIR_DIVIDER + fileName)
            splitAndSave(trainRawVolume, TEST_IMAGE_PATH, 0)
            testMaskVolume = TIFF.open(MASK_VOLUME_PATH + DIR_DIVIDER +
Пример #43
0
def tiff_to_read(tiff_image_name):
    tif = TIFF.open(tiff_image_name, mode="r")
    im_stack = list()
    for im in list(tif.iter_images()):
        im_stack.append(im)
    return np.stack(im_stack)
Пример #44
0
def main():
    # Load the model config ----
    out_dir = os.path.join(IN_OUT_ROOT, str(args.model_ind))
    reloaded_config_path = os.path.join(out_dir, "config.pickle")
    print("Loading restarting config from: %s" % reloaded_config_path)
    with open(reloaded_config_path, "rb") as config_f:
        config = pickle.load(config_f)
    assert (config.model_ind == args.model_ind)

    out_sub_dir = os.path.join(out_dir, SUB_DIR)
    if not os.path.exists(out_sub_dir):
        os.makedirs(out_sub_dir)

    print("Model output size: %d" % config.input_sz)

    if config.input_sz == 200:
        rerescale = 1.
    else:
        assert (config.input_sz == 100)
        rerescale = 0.5

    next_index = 0  # blocks
    num_img = 0  # input imgs
    num_gt = 0

    # make colour dict for predictions (gt_k - we render reordered)
    if (config.gt_k == 3):
        colour_map = [
            np.array([175, 28, 12], dtype=np.uint8),
            np.array([111, 138, 155], dtype=np.uint8),
            np.array([81, 188, 0], dtype=np.uint8),
        ]
    else:
        colour_map = [(np.random.rand(3) * 255.).astype(np.uint8)
                      for _ in xrange(config.gt_k)]

    # so it's a random order in forward pass
    save_names = np.random.permutation(NUM_TRAIN)  # order in input_blocks
    save_names_to_orig_pos = {}  # int to (img int, row int, col int)
    input_blocks = np.zeros((NUM_TRAIN, 4, config.input_sz, config.input_sz),
                            dtype=np.uint8)

    for img_path in sorted(glob.glob(SOURCE_IMGS_DIR + "/*.tif")):
        print("on img: %d %s" % (num_img, datetime.now()))
        sys.stdout.flush()
        # each block's image and gt (if exists) share same filenames

        handle = os.path.basename(img_path)[:-len(SOURCE_IMGS_SUFFIX)]
        tif = TIFF.open(img_path, mode="r")
        img = tif.read_image()
        assert (img.shape == (6000, 6000, 4))  # uint8 np array, RGBIR

        # Print input image rgb
        shrunk_img = cv2.resize(img,
                                dsize=None,
                                fx=0.5 * rerescale,
                                fy=0.5 * rerescale,
                                interpolation=cv2.INTER_LINEAR)
        Image.fromarray(shrunk_img[:, :, :3]).save(
            os.path.join(out_sub_dir, "%d_img.png" % num_img))

        # Store blocks in randomly shuffled array
        split_imgs(num_img, img, next_index, names=save_names, cut=400,
                   rescale=0.5, rerescale=rerescale, storage=input_blocks, \
                   save_names_to_orig_pos=save_names_to_orig_pos)

        # Get gt image
        gt_path = os.path.join(SOURCE_GT_DIR, handle + SOURCE_GT_SUFFIX)
        if os.path.isfile(gt_path):
            num_gt += 1

            gt_tif = TIFF.open(gt_path, mode="r")
            gt = gt_tif.read_image()
            assert (gt.shape == (6000, 6000, 3))  # uint8 np array, RGB

            filter_gt_and_store(config,
                                num_img,
                                gt,
                                rescale=0.5,
                                rerescale=rerescale,
                                colour_map=colour_map,
                                out_dir=out_sub_dir)

        next_index += OUT_PER_SOURCE
        num_img += 1

        if args.test_code and num_img == NUM_SOURCE_IMGS:
            break

    assert (next_index == NUM_TRAIN)
    assert (num_img == NUM_SOURCE_IMGS)
    assert (num_gt == NUM_SOURCE_GT)

    predict_and_reassemble(config, input_blocks, NUM_SOURCE_IMGS,
                           save_names_to_orig_pos, colour_map, out_sub_dir)
Пример #45
0
            packed_overlap1.astype(np.int32).ravel(),
            packed_overlap2.astype(np.int32).ravel())
        overlap_labels1, overlap_labels2, overlap_areas = counter.get_counts_pair32(
        )

        areacounter = fast64counter.ValueCountInt64()
        areacounter.add_values(packed_overlap1.ravel())
        areacounter.add_values(packed_overlap2.ravel())
        areas = dict(zip(*areacounter.get_counts()))

        if Debug:
            from libtiff import TIFF

            # output full block images
            for image_i in range(block1.shape[2]):
                tif = TIFF.open('block1_z{0:04}.tif'.format(image_i), mode='w')
                tif.write_image(np.uint8(block1[:, :, image_i] * 13 % 251))
                tif = TIFF.open('block2_z{0:04}.tif'.format(image_i), mode='w')
                tif.write_image(np.uint8(block2[:, :, image_i] * 13 % 251))

            #output overlap images
            if single_image_matching:
                tif = TIFF.open('packed_overlap1.tif', mode='w')
                tif.write_image(
                    np.uint8(np.squeeze(packed_overlap1) * 13 % 251))
                tif = TIFF.open('packed_overlap2.tif', mode='w')
                tif.write_image(
                    np.uint8(np.squeeze(packed_overlap2) * 13 % 251))
            else:
                for image_i in range(packed_overlap1.shape[2]):
                    tif = TIFF.open(
Пример #46
0
 def open(cls, filename: Union[str, bytes], mode: str = 'r'):
     return TiffReader(TIFF.open(filename, mode))
Пример #47
0
def get_batch_data():
	global train_files,files,s1
	i=0
	X=np.array([]).reshape((0,resizeDim,resizeDim, nchannels))
	Y=[]	
	j=0
	i=0
	k=0
	random.shuffle(train_files)

	for ind in train_files:
		tif = TIFF.open(files[ind], mode='r')
		image = tif.read_image()
		dataAll = np.array(image)
		if(dataAll.shape[0]>resizeDim or dataAll.shape[1]>resizeDim):
			continue

		village_code=int((files[ind].split('@')[3]).split('.')[0])
		val=0
		try:
			try:
				val=int(s1.loc[village_code])
			except:
				continue
		except:
			continue
		data=np.delete(dataAll,[11,12],axis=2)

		band2=data[:,:,1]
		band3=data[:,:,2]
		band4=data[:,:,3]
		band5=data[:,:,4]
		band6=data[:,:,5]
		band7=data[:,:,6]
		sum45=band4+band5
		sum35=band3+band5
		sum56=band5+band6
		sum57=band5+band7
		####ndvi
		sum45[sum45==0.0]=1.0
		ndvi=(band5-band4)/sum45
		####ndwi
		sum35[sum35==0.0]=1.0
		ndwi=(band3-band5)/sum35
		####ndbi
		sum56[sum56==0.0]=1.0
		ndbi=(band6-band5)/sum56
		####ui
		sum57[sum57==0.0]=1.0
		ui=(band7-band5)/sum57
		####evi
		complexDenom=(band5+6*band4-7.5*band2+1.0)
		complexDenom[complexDenom==0.0] = 1.0
		band4Denom= band4.copy()
		band4Denom[band4Denom==0.0]=1.0
		eviHelper=2.5*(band5/band4Denom)
		evi=eviHelper/complexDenom

		combinedData=np.dstack((data,ndvi,ndwi,ndbi,ui,evi))

		left=(resizeDim-combinedData.shape[0])//2
		right=resizeDim-combinedData.shape[0]-left
		up=(resizeDim-combinedData.shape[1])//2
		down=resizeDim-combinedData.shape[1]-up

		data1=np.lib.pad(combinedData,[(left,right),(up,down),(0,0)],'constant')
		data1=np.reshape(data1,(1,200,200,16))
		if np.isnan(data1).any():
			continue
		else:
			X=np.vstack((X,data1))
			Y.append(val)

		i+=1
        
		if i%(64)==0:
			X=np.asarray(X,dtype=np.float32)
			Y-=1
                        Y=np.asarray(Y,dtype=np.int32)
			dataset = (X, Y)
			return dataset
Пример #48
0
def get_eval_train_data():
    global k
    global ind
    global train_files, files, s1
    while true:
        X = np.array([]).reshape((0, resizeDim, resizeDim, nchannels))
        Y = np.zeros((batch_size, numclasses))

        while ind < len(train_files):

            ind = (ind + 1) % len(train_files)
            tif = TIFF.open(files[train_files[ind]], mode='r')
            image = tif.read_image()
            dataAll = np.array(image)
            if (dataAll.shape[0] > resizeDim or dataAll.shape[1] > resizeDim):
                continue

            village_code = int(
                (files[train_files[ind]].split('@')[3]).split('.')[0])
            val = 0
            try:
                try:
                    val = int(s1.loc[village_code]) - 1
                except:
                    continue
            except:
                continue
            data = np.delete(dataAll, [11, 12], axis=2)

            band2 = data[:, :, 1]
            band3 = data[:, :, 2]
            band4 = data[:, :, 3]
            band5 = data[:, :, 4]
            band6 = data[:, :, 5]
            band7 = data[:, :, 6]
            sum45 = band4 + band5
            sum35 = band3 + band5
            sum56 = band5 + band6
            sum57 = band5 + band7
            ####ndvi
            sum45[sum45 == 0.0] = 1.0
            ndvi = (band5 - band4) / sum45
            ####ndwi
            sum35[sum35 == 0.0] = 1.0
            ndwi = (band3 - band5) / sum35
            ####ndbi
            sum56[sum56 == 0.0] = 1.0
            ndbi = (band6 - band5) / sum56
            ####ui
            sum57[sum57 == 0.0] = 1.0
            ui = (band7 - band5) / sum57
            ####evi
            complexDenom = (band5 + 6 * band4 - 7.5 * band2 + 1.0)
            complexDenom[complexDenom == 0.0] = 1.0
            band4Denom = band4.copy()
            band4Denom[band4Denom == 0.0] = 1.0
            eviHelper = 2.5 * (band5 / band4Denom)
            evi = eviHelper / complexDenom

            combinedData = np.dstack((band2, band3, band4))

            left = (resizeDim - combinedData.shape[0]) // 2
            right = resizeDim - combinedData.shape[0] - left
            up = (resizeDim - combinedData.shape[1]) // 2
            down = resizeDim - combinedData.shape[1] - up

            data1 = np.lib.pad(combinedData, [(left, right), (up, down),
                                              (0, 0)], 'constant')
            data1 = np.reshape(data1, (1, resizeDim, resizeDim, nchannels))
            if np.isnan(data1).any():
                continue
            else:
                X = np.vstack((X, data1))
                Y[k % 64, val] = 1

            k += 1
            if k % (64) == 0:
                X = np.asarray(X, dtype=np.float32)
                Y = np.asarray(Y, dtype=np.int32)
                dataset = (X, Y)
                print(k)
                yield X, Y
                break
Пример #49
0
        else:

            packed_vol = np.reshape(packed_vol, label_vol.shape)

            print "Cleanup starting with {0} segments.".format(nlabels)

            # Grow labels so there are no boundary pixels
            if has_boundaries:
                for image_i in range(packed_vol.shape[2]):
                    label_image = packed_vol[:,:,image_i]
                    packed_vol[:,:,image_i] = mahotas.cwatershed(np.zeros(label_image.shape, dtype=np.uint32), label_image, return_lines=False)

            if Debug:
                from libtiff import TIFF
                for image_i in range(packed_vol.shape[2]):
                    tif = TIFF.open('preclean_z{0:04}.tif'.format(image_i), mode='w')
                    tif.write_image(np.uint8(packed_vol[:, :, image_i] * 13 % 251))

            # Determine label adjicency and sizes

            borders = np.zeros(packed_vol.shape, dtype=np.bool)

            # Code currently only supports a 3d volume
            assert(packed_vol.ndim == 3)

            with timer.Timer("adjicency matrix construction"):

                full_npix = scipy.sparse.coo_matrix((nlabels, nlabels), dtype=np.uint32)
                full_prob = scipy.sparse.coo_matrix((nlabels, nlabels), dtype=np.float32)

                for axis in range(packed_vol.ndim):
Пример #50
0
import numpy as np
from scipy import misc
from PIL import Image
from libtiff import TIFF
import numpy as np
import cv2
from matplotlib import pyplot as plt
import time
import sys

fp = sys.argv[1]
tif = TIFF.open(fp, mode='r')  # 打開tiff文件進行讀取
im = tif.read_image()  # 讀取圖像並作爲numpy數組返回
row, col = im.shape
sketch = np.zeros([row, col])

# 将原始二维三通道RGB图像转换为带标签的二维单通道图像
time1 = time.time()
for i in range(row):
    for j in range(col):
        tmp = im[i, j].tolist()
        if tmp == [0, 0, 255]:
            sketch[i, j] = 2
            continue
        if tmp == [0, 255, 0]:
            sketch[i, j] = 1
            continue
        if tmp == [85, 107, 47]:
            sketch[i, j] = 1
            continue
        if tmp == [210, 105, 30]:
Пример #51
0
def get_batch_data():
    global train_files, files, s1
    i = 0
    j = 0
    i = 0
    k = 0
    while True:
        random.shuffle(train_files)
        X = np.array([]).reshape((0, resizeDim, resizeDim, nchannels))
        Y = np.zeros((batch_size, numclasses))

        for ind in train_files:
            tif = TIFF.open(files[ind], mode='r')
            image = tif.read_image()
            dataAll = np.array(image)
            if (dataAll.shape[0] > resizeDim or dataAll.shape[1] > resizeDim):
                continue

            village_code = int((files[ind].split('@')[3]).split('.')[0])
            val = 0
            try:
                try:
                    val = int(s1.loc[village_code]) - 1
                except:
                    continue
            except:
                continue
            data = np.delete(dataAll, [11, 12], axis=2)

            band2 = data[:, :, 1]
            band3 = data[:, :, 2]
            band4 = data[:, :, 3]
            band5 = data[:, :, 4]
            band6 = data[:, :, 5]
            band7 = data[:, :, 6]
            sum45 = band4 + band5
            sum35 = band3 + band5
            sum56 = band5 + band6
            sum57 = band5 + band7
            ####ndvi

            combinedData = np.dstack((band2, band3, band4))

            left = (resizeDim - combinedData.shape[0]) // 2
            right = resizeDim - combinedData.shape[0] - left
            up = (resizeDim - combinedData.shape[1]) // 2
            down = resizeDim - combinedData.shape[1] - up

            data1 = np.lib.pad(combinedData, [(left, right), (up, down),
                                              (0, 0)], 'constant')
            data1 = np.reshape(data1, (1, resizeDim, resizeDim, nchannels))
            if np.isnan(data1).any():
                continue
            else:
                X = np.vstack((X, data1))
                Y[i % batch_size, val] = 1

            i += 1

            if i % (64) == 0:
                X = np.asarray(X, dtype=np.float32)
                Y = np.asarray(Y, dtype=np.int32)
                dataset = (X, Y)
                yield X, Y
                break
Пример #52
0
import base64
from PIL import Image, ImageSequence
import numpy as np
from libtiff import TIFF

#--------------------------------------------------------#
# LiffTiff
#--------------------------------------------------------#
file = TIFF.open('18340.bin', 'r')
for i, image in enumerate(file.iter_images()):
    Image.fromarray(image).save("png/page%d.png" % i)

#--------------------------------------------------------#
# Trabajando con Base 64 y PILL
#--------------------------------------------------------#

#with open('18340.bin', mode='rb') as file:
#    str =base64.b64encode(file.read())

#imgdata = base64.b64decode(str)
#filename = 'some.tif'

#with open(filename, 'wb') as f:
#  f.write(imgdata)

#from numpy import save

#im = Image.open("some.tiff")
#for i, page in enumerate(ImageSequence.Iterator(im)):
#page.save("png/page%d.png" % i)
#region = (0, 0, 640, 980)
Пример #53
0
def create_data(src):
    images = os.listdir(src)
    total = int(len(images) / 2)
    print(total)

    imgs = np.ndarray((total, image_x, image_y, image_z), dtype=np.uint8)
    imgs_mask = np.ndarray((total, image_x, image_y, image_z), dtype=np.uint8)

    print('Creating training images...')
    i = 0
    for image_name in images:
        if 'mask' in image_name:
            continue
        image_mask_name = image_name.split('.')[0] + "_mask.tif"

        tif = TIFF.open(src + '/' + image_name, mode='r')
        tif_mask = TIFF.open(src + '/' + image_mask_name, mode='r')

        count = 0
        imageAll = np.array([0])
        for image in tif.iter_images():
            if count == 0:
                imageAll = image
            else:
                imageAll = np.dstack((imageAll, image))
            count = count + 1

        count_mask = 0
        imageAll_mask = np.array([0])
        for image_mask in tif_mask.iter_images():
            if count_mask == 0:
                imageAll_mask = image_mask
            else:
                imageAll_mask = np.dstack((imageAll_mask, image_mask))
            count_mask = count_mask + 1

        imgs[i] = imageAll
        imgs_mask[i] = imageAll_mask
        i = i + 1

    print(imgs.shape)
    print(imgs_mask.shape)

    imgs = imgs.astype('float32')
    imgs_mask = imgs_mask.astype('float32')

    mean = np.mean(imgs)
    std = np.std(imgs)
    print('mean = ', mean)
    print('std = ', std)

    imgs -= mean
    imgs /= std

    imgs_mask /= 255.

    # add one dimension, corresponding to the input channel
    imgs = imgs[..., np.newaxis]
    imgs_mask = imgs_mask[..., np.newaxis]

    np.save('images_train.npy', imgs)
    np.save('images_mask_train.npy', imgs_mask)

    print('Creating data done!')
Пример #54
0
 def tiff_iterator(paths_tiff):
     for tf in paths_tiff:
         tif = TIFF.open(tf)
         for image in tif.iter_images():
             yield image
         tif.close()
        kernel = np.zeros(
            (in_channels, out_channels, kernel_size, kernel_size, kernel_size), dtype=np.float32
        )
        kernel[range(in_channels), range(out_channels), :, :, :] = bil_filt

        self.upconv = nn.ConvTranspose3d(in_channels, out_channels, kernel_size,
                                         stride=self.up_scale, padding=self.padding)

        self.upconv.weight.data.copy_(torch.from_numpy(kernel))
        self.upconv.bias.data.fill_(0)
        self.upconv.weight.requires_grad = False
        self.upconv.bias.requires_grad = False

    def forward(self, x):
        return self.upconv(x)

if __name__ == '__main__':
    BI = BilinearInterpolation3d(1,1,2)
    # tmp_x = np.array([[[1, 2, 3], [4, 5, 6], [7,8,9]], [[1,1,1],[2,2,2],[3,3,3]]], dtype='float32')
    tmp_x = io.imread('/media/dongmeng/Hulk/dataset/total_d8/image/0006/0006.tif').astype('float32')
    tmp_x = tmp_x[np.newaxis, np.newaxis, :, :, :]
    in_x = torch.tensor(tmp_x)
    out_x = BI.forward(in_x)
    out_x = out_x.numpy()
    image3D = TIFF.open(os.path.join('/media/dongmeng/Hulk/dataset/total_d8/image/0006/1', 'up.tif'), mode='w')
    out_x = np.squeeze(out_x)
    for k in range(out_x.shape[0]):
        image3D.write_image(out_x[k, :].astype('uint16'), compression='lzw', write_rgb=False)
    image3D.close()
    #print(out_x)
Пример #56
0
def download_image_data(image_ids_or_dataset_id,
                        dataset=False,
                        channel=None,
                        z_stack=0,
                        frame=0,
                        coord=(0, 0),
                        width=0,
                        height=0,
                        region_spec='rectangle',
                        skip_failed=False,
                        download_tar=False,
                        omero_host='idr.openmicroscopy.org',
                        omero_secured=False,
                        config_file=None):

    if config_file is None:  # IDR connection
        omero_username = '******'
        omero_password = '******'
    else:  # other omero instance
        with open(config_file) as f:
            cfg = json.load(f)
            omero_username = cfg['username']
            omero_password = cfg['password']

            if omero_username == "" or omero_password == "":
                omero_username = '******'
                omero_password = '******'

    if region_spec not in ['rectangle', 'center']:
        raise ValueError(
            'Got unknown value "{0}" as region_spec argument'.format(
                region_spec))
    with ExitStack() as exit_stack:
        conn = exit_stack.enter_context(
            BlitzGateway(omero_username,
                         omero_password,
                         host=omero_host,
                         secure=omero_secured))
        # exit_stack.callback(conn.connect().close)
        if download_tar:
            # create an archive file to write images to
            archive = exit_stack.enter_context(
                tarfile.open('images.tar', mode='w'))
            tempdir = exit_stack.enter_context(TemporaryDirectory())

        if dataset:
            dataset_warning_id = 'Dataset-ID: {0}'.format(
                image_ids_or_dataset_id[0])
            try:
                dataset_id = int(image_ids_or_dataset_id[0])
            except ValueError:
                image_ids = None
            else:
                try:
                    dataset = conn.getObject("Dataset", dataset_id)
                except Exception as e:
                    # respect skip_failed on unexpected errors
                    if skip_failed:
                        warn(str(e), dataset_warning_id, warn_skip=True)
                    else:
                        raise
                else:
                    image_ids = [image.id for image in dataset.listChildren()]

            if image_ids is None:
                if skip_failed:
                    warn(
                        'Unable to find a dataset with this ID in the '
                        'database.',
                        dataset_warning_id,
                        warn_skip=True)
                else:
                    raise ValueError(
                        '{0}: Unable to find a dataset with this ID in the '
                        'database. Aborting!'.format(dataset_warning_id))

        else:
            # basic argument sanity checks and adjustments
            prefix = 'image-'
            # normalize image ids by stripping off prefix if it exists
            image_ids = [
                iid[len(prefix):] if iid[:len(prefix)] == prefix else iid
                for iid in image_ids_or_dataset_id
            ]
        for image_id in image_ids:
            image_warning_id = 'Image-ID: {0}'.format(image_id)
            try:
                image_id = int(image_id)
            except ValueError:
                image = None
            else:
                try:
                    image = conn.getObject("Image", image_id)
                except Exception as e:
                    # respect skip_failed on unexpected errors
                    if skip_failed:
                        warn(str(e), image_warning_id, warn_skip=True)
                        continue
                    else:
                        raise

            if image is None:
                if skip_failed:
                    warn(
                        'Unable to find an image with this ID in the '
                        'database.',
                        image_warning_id,
                        warn_skip=True)
                    continue
                raise ValueError(
                    '{0}: Unable to find an image with this ID in the '
                    'database. Aborting!'.format(image_warning_id))

            try:
                # try to extract image properties
                # if anything goes wrong here skip the image
                # or abort.
                image_name = os.path.splitext(image.getName())[0]
                image_warning_id = '{0} (ID: {1})'.format(image_name, image_id)

                if region_spec == 'rectangle':
                    tile = get_clipping_region(image, *coord, width, height)
                elif region_spec == 'center':
                    tile = get_clipping_region(
                        image, *_center_to_ul(*coord, width, height))

                ori_z, z_stack = z_stack, confine_plane(image, z_stack)
                ori_frame, frame = frame, confine_frame(image, frame)
                num_channels = image.getSizeC()
                if channel is None:
                    channel_index = 0
                else:
                    channel_index = find_channel_index(image, channel)
            except Exception as e:
                # respect skip_failed on unexpected errors
                if skip_failed:
                    warn(str(e), image_warning_id, warn_skip=True)
                    continue
                else:
                    raise

            # region sanity checks and warnings
            if tile[2] < width or tile[3] < height:
                # The downloaded image region will have smaller dimensions
                # than the specified width x height.
                warn(
                    'Downloaded image dimensions ({0} x {1}) will be smaller '
                    'than the specified width and height ({2} x {3}).'.format(
                        tile[2], tile[3], width, height), image_warning_id)

            # z-stack sanity checks and warnings
            if z_stack != ori_z:
                warn(
                    'Specified image plane ({0}) is out of bounds. Using {1} '
                    'instead.'.format(ori_z, z_stack), image_warning_id)

            # frame sanity checks and warnings
            if frame != ori_frame:
                warn(
                    'Specified image frame ({0}) is out of bounds. Using '
                    'frame {1} instead.'.format(ori_frame, frame),
                    image_warning_id)

            # channel index sanity checks and warnings
            if channel is None:
                if num_channels > 1:
                    warn(
                        'No specific channel selected for multi-channel '
                        'image. Using first of {0} channels.'.format(
                            num_channels), image_warning_id)
            else:
                if channel_index == -1 or channel_index >= num_channels:
                    if skip_failed:
                        warn(str(channel) +
                             ' is not a known channel name for this image.',
                             image_warning_id,
                             warn_skip=True)
                        continue
                    else:
                        raise ValueError(
                            '"{0}" is not a known channel name for image {1}. '
                            'Aborting!'.format(channel, image_warning_id))

            # download and save the region as TIFF
            fname = '__'.join([image_name, str(image_id)] +
                              [str(x) for x in tile])
            try:
                if fname[-5:] != '.tiff':
                    fname += '.tiff'

                fname = fname.replace(' ', '_')

                im_array = get_image_array(image, tile, z_stack, channel_index,
                                           frame)

                if download_tar:
                    fname = os.path.join(tempdir, fname)
                try:
                    tiff = TIFF.open(fname, mode='w')
                    tiff.write_image(im_array)
                finally:
                    tiff.close()
                # move image into tarball
                if download_tar:
                    archive.add(fname, os.path.basename(fname))
                    os.remove(fname)
            except Exception as e:
                if skip_failed:
                    # respect skip_failed on unexpected errors
                    warn(str(e), image_warning_id, warn_skip=True)
                    continue
                else:
                    raise
Пример #57
0
c = 0
n_samples = 10
img_counter = 0
runs = 0
for n in range(10):
    print("Run:", runs)
    runs += 1
    print("press Enter")
    i = input()
    try:
        c = 0
        while c < n_samples:
            counter += 1
            c += 1
            img = b.recv_msg("realsense_images", -1)

            rgb = np.reshape(img.get_rgb(), img.get_shape_rgb())
            rgb = Image.fromarray(rgb)
            rgb.save("./images_franka/rgb/" + str(img_counter) + ".png")

            depth = np.reshape(img.get_depth(), img.get_shape_depth())
            tiff = TIFF.open("./images_franka/depth/" + str(img_counter) +
                             ".tiff",
                             mode="w")
            tiff.write_image(depth)
            tiff.close()
            img_counter += 1
            print(counter)
    except KeyboardInterrupt:
        break
def infer_little_img(input_image_path,
                     patch_size=224,
                     stride_ver=112,
                     stride_hor=112):
    tf.reset_default_graph()
    input_image = TIFF.open(input_image_path, 'r')
    input_image = input_image.read_image()

    # need to be fixed
    element = input_image_path.split('_')
    if len(element[7]) == 1:
        element[7] = '0' + element[7]
    if len(element[8]) == 1:
        element[8] = '0' + element[8]
    print('ISPRS_semantic_labeling_Potsdam/1_DSM/dsm_potsdam_' + element[7] +
          "_" + element[8] + ".tif")
    #dsm_image= imread('ISPRS_semantic_labeling_Potsdam/1_DSM/dsm_potsdam_'+element[7]+"_"+element[8]+".tif")

    dsm_image = TIFF.open(
        'ISPRS_semantic_labeling_Potsdam/1_DSM/dsm_potsdam_' + element[7] +
        "_" + element[8] + ".tif", 'r')
    dsm_image = dsm_image.read_image()
    dsm_image = np.expand_dims(dsm_image, axis=2)
    ndsm_image = imread(
        'ISPRS_semantic_labeling_Potsdam/1_DSM_normalisation/dsm_potsdam_' +
        element[7] + "_" + element[8] + "_normalized_lastools.jpg")
    ndsm_image = np.expand_dims(ndsm_image, axis=2)

    height = np.shape(input_image)[0]
    width = np.shape(input_image)[1]
    output_image = np.zeros(shape=(height, width, 3))
    print(np.shape(input_image))
    print(np.shape(ndsm_image))
    print(np.shape(dsm_image))
    input_image = np.concatenate((input_image, ndsm_image, dsm_image), axis=2)
    output_map = np.zeros((height, width, 6), dtype=np.float32)
    number_of_vertical_points = (height - patch_size) // stride_ver + 1
    number_of_horizontial_points = (width - patch_size) // stride_hor + 1
    sess = tf.Session()
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 6],
                           name="input_image")
    _, logits = inference(image, keep_probability)
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    input_image = np.expand_dims(input_image, axis=0)
    for i in range(number_of_vertical_points):
        for j in range(number_of_horizontial_points):
            current_patch = input_image[:, i * stride_ver:i * stride_ver +
                                        patch_size,
                                        j * stride_hor:j * stride_hor +
                                        patch_size, :]
            logits_result = sess.run(logits,
                                     feed_dict={
                                         image: current_patch,
                                         keep_probability: 1.0
                                     })
            logits_result = tf.squeeze(logits_result)
            patch_result = sess.run(logits_result)
            output_map[i * stride_ver:i * stride_ver + patch_size,
                       j * stride_hor:j * stride_hor +
                       patch_size, :] += patch_result
            print('stage 1: i=' + str(i) + "; j=" + str(j))
    for i in range(number_of_vertical_points):
        current_patch = input_image[:,
                                    i * stride_ver:i * stride_ver + patch_size,
                                    width - patch_size:width, :]
        logits_result = sess.run(logits,
                                 feed_dict={
                                     image: current_patch,
                                     keep_probability: 1.0
                                 })
        logits_result = tf.squeeze(logits_result)
        patch_result = sess.run(logits_result)
        output_map[i * stride_ver:i * stride_ver + patch_size,
                   width - patch_size:width, :] += patch_result
        print('stage 2: i=' + str(i) + "; j=" + str(j))
    for i in range(number_of_horizontial_points):
        current_patch = input_image[:, height - patch_size:height, i *
                                    stride_hor:i * stride_hor + patch_size, :]
        logits_result = sess.run(logits,
                                 feed_dict={
                                     image: current_patch,
                                     keep_probability: 1.0
                                 })
        logits_result = tf.squeeze(logits_result)
        patch_result = sess.run(logits_result)
        output_map[height - patch_size:height, i * stride_hor:i * stride_hor +
                   patch_size, :] += patch_result
        print('stage 3: i=' + str(i) + "; j=" + str(j))
    current_patch = input_image[:, height - patch_size:height,
                                width - patch_size:width, :]
    logits_result = sess.run(logits,
                             feed_dict={
                                 image: current_patch,
                                 keep_probability: 1.0
                             })
    logits_result = tf.squeeze(logits_result)
    patch_result = sess.run(logits_result)
    output_map[height - patch_size:height,
               width - patch_size:width, :] += patch_result
    predict_annotation_image = np.argmax(output_map, axis=2)
    print(np.shape(predict_annotation_image))
    for i in range(height):
        for j in range(width):
            if predict_annotation_image[i, j] == 0:
                output_image[i, j, :] = [255, 255, 255]
            elif predict_annotation_image[i, j] == 1:
                output_image[i, j, :] = [0, 0, 255]
            elif predict_annotation_image[i, j] == 2:
                output_image[i, j, :] = [0, 255, 255]
            elif predict_annotation_image[i, j] == 3:
                output_image[i, j, :] = [0, 255, 0]
            elif predict_annotation_image[i, j] == 4:
                output_image[i, j, :] = [255, 255, 0]
            elif predict_annotation_image[i, j] == 5:
                output_image[i, j, :] = [255, 0, 0]
    return output_image
Пример #59
0
def write_tif(filename, array):
    tiff = TIFF.open(str(filename), mode='w')
    tiff.write_image(array)
    tiff.close()
    for i in range(n_h - 1):
        for j in range(n_w - 1):
            crop_x = a[(i * stride):((i * stride) + crop_size),
                       (j * stride):((j * stride) + crop_size), :]
            croped_images.append(crop_x)
    return croped_images


# Making array of all the training sat images as it is without any cropping

xtrain_list = []

for fname in filelist_trainx[:1]:

    # Reading the image
    tif = TIFF.open(fname)
    image = tif.read_image()

    crop_size = 128

    stride = 32

    h, w, c = image.shape

    n_h = int(int(h / stride))
    n_w = int(int(w / stride))

    image = padding(image, w, h, c, crop_size, stride, n_h, n_w)

    xtrain_list.append(image)