コード例 #1
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def extract_surf(image):# todo change the name of the function

   """********************************************************************* 

   """# todo : copy the documentation from the C++ file
  
 
   # save input images 
   tmp_folder=tempfile.mkdtemp()
   imwrite(os.path.join(tmp_folder,'image.png'),image)
  

   command='cd %s'%tmp_folder+';'# moving in the temporary folder
   command+=bin_directory+'/extract_surf image.png surf_point.txt'
   

   # calling the executable
   os.system( command)   

   # todo: read the outputs from the temporary file
   # it call help to open the temporary folder in your file explorer
   #imgOutVert=imread(os.path.join(tmp_folder,'imgOutVert.png'))
   #imgOutHori=imread(os.path.join(tmp_folder,'imgOutHori.png'))

   with open(os.path.join(tmp_folder,'surf_point.txt'),'r') as file:
      l=file.readline()#skipping the first line
      l=file.readline()
      key_points=np.loadtxt(file)


            
   # todo : delete the temporary files
   shutil.rmtree(tmp_folder)

   return key_points
コード例 #2
0
def main():
    image = data.astronaut()
    image = ia.imresize_single_image(image, (HEIGHT, WIDTH))

    kps = []
    for y in range(NB_ROWS):
        ycoord = BB_Y1 + int(y * (BB_Y2 - BB_Y1) / (NB_COLS - 1))
        for x in range(NB_COLS):
            xcoord = BB_X1 + int(x * (BB_X2 - BB_X1) / (NB_ROWS - 1))
            kp = (xcoord, ycoord)
            kps.append(kp)
    kps = set(kps)
    kps = [ia.Keypoint(x=xcoord, y=ycoord) for (xcoord, ycoord) in kps]
    kps = ia.KeypointsOnImage(kps, shape=image.shape)

    bb = ia.BoundingBox(x1=BB_X1, x2=BB_X2, y1=BB_Y1, y2=BB_Y2)
    bbs = ia.BoundingBoxesOnImage([bb], shape=image.shape)

    seq = iaa.Affine(rotate=45)
    seq_det = seq.to_deterministic()
    image_aug = seq_det.augment_image(image)
    kps_aug = seq_det.augment_keypoints([kps])[0]
    bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]

    image_before = np.copy(image)
    image_before = kps.draw_on_image(image_before)
    image_before = bbs.draw_on_image(image_before)

    image_after = np.copy(image_aug)
    image_after = kps_aug.draw_on_image(image_after)
    image_after = bbs_aug.draw_on_image(image_after)

    ia.imshow(np.hstack([image_before, image_after]))
    imageio.imwrite("bb_aug.jpg", np.hstack([image_before, image_after]))
コード例 #3
0
ファイル: generate.py プロジェクト: dj-shin/ctf2018-writeups
def generate(flag):
    data = gen_qr(flag, version=7, box_size=1)
    patch_size = 5

    patch_list = list()
    for i in range(0, data.shape[0], patch_size):
        for j in range(0, data.shape[1], patch_size):
            patch_list.append((i , j))

    # setup
    x, y = patch_list[0]
    patch_data = str(int(''.join([str(c) for c in list(data[x:x+patch_size, y:y+patch_size].flatten())]), 2))
    for v in range(1, 41):
        try:
            patch_qr = gen_qr(patch_data, version=v)
        except qrcode.exceptions.DataOverflowError:
            continue
        else:
            factor = patch_qr.shape[0] // patch_size
            padding = patch_qr.shape[0] - factor * patch_size
            if padding > 0:
                continue
            else:
                break

    # emplace
    large_data = np.repeat(np.repeat(data, factor, axis=0), factor, axis=1)
    for x, y in patch_list:
        patch_data = str(int(''.join([str(c) for c in list(data[x:x+patch_size, y:y+patch_size].flatten())]), 2))
        patch_qr = np.rot90(gen_qr(patch_data, version=v), k=random.randint(0, 3))
        large_data[x * factor : x * factor + patch_qr.shape[0], y * factor : y * factor + patch_qr.shape[1]] = patch_qr

    print(large_data.shape)
    imageio.imwrite('code.png', np.repeat(np.repeat(large_data, 4, axis=0), 4, axis=1) * 255)
コード例 #4
0
ファイル: scratch.py プロジェクト: BooDoo/animecommentbot
def make_comment(count=1, out_path="output", vid_file=None):
    vid_file = vid_file or get_random_mkv()
    log(u"Using {} as source...".format(os.path.basename(vid_file)))
    label = os.path.basename(vid_file).split(".")[0].replace(" ","").lower()
    vid_clip = VideoFileClip(vid_file)
    earliest = int(vid_clip.duration * 0.1)
    latest = int(vid_clip.duration * 0.9)
    valid_range = range(earliest, latest+1)

    sub_opts = make_sub_opts(vid_clip);
    with open("queue.txt", "a") as queue:
        real_lines = process_memorable_lines(20, 110)
        for n in range(1, count+1):
            ### this "None" will get a random SRT from corpora, by default.
            ### Can override with specific source file
            ### txt_line = get_nyer_caption(20, 110)
            ### txt_line = get_tweetable_line("corpora/tentacle-rough.txt", min_length=30, max_length=90)
            txt_line = choice(real_lines)
            debug(u"Using {} as subtitle...".format(txt_line.encode('utf8', 'ignore')))
            txt_clip = sub_generator(txt_line, **sub_opts)

            composed = CompositeVideoClip([vid_clip, txt_clip.set_pos("top")])
            frame = composed.get_frame(choice(valid_range))
            log(u"\tWriting {0} of {1:03d}...".format(n, count) )
            image_path = u"{0}/{1}_{2:03d}.png".format(out_path, label, n)
            imwrite(image_path, frame)
            queue.write(u"{0}{1}{2}\n".format(image_path, queue_separator, txt_line).encode('utf8', 'replace') )
コード例 #5
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def nlmeans(image,sigma,noise_free=None):
   """
   `nlmeans_ipol ` takes 4 parameter: `nlmeans_ipol in.png sigma noisy.png denoised.png`
   * `sigma`     : the noise standard deviation
   * `in.png`   : initial noise free image
   * `noisy.png`  : noisy image used by the denoising algorithm
   * `denoised.png` : denoised image
   """

   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG')[1]
   
   temp_image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(temp_image_file,image)
   
   if not noise_free is None:
      temp_noise_free_image_file =tempfile.mkstemp('.PNG')[1]
      imwrite(temp_noise_free_image_file,noise_free)
   else:
      temp_noise_free_image_file=temp_image_file
   
   command=exec_folder+'/nlmeans_ipol %s %f %s %s'%(temp_noise_free_image_file,sigma,temp_image_file,output_file)
      
   # calling the executable
   os.system( command)   
   #reading the output from the temporary file
   output=imread(output_file)  
   os.remove(output_file)
   if not noise_free is None:
      os.remove(temp_noise_free_image_file)
   os.remove(temp_image_file)
   return output
コード例 #6
0
ファイル: plot.py プロジェクト: chr5tphr/ecGAN
def save_colorized_image(data, fpath, center=None, cmap='hot', batchnorm=False, fullcmap=False, what='explanation', outshape=[5, 6]):
    if isinstance(data, nd.NDArray):
        data = asnumpy(data)
    N, C, H, W = data.shape
    data = data.transpose([0, 2, 3, 1])

    if outshape is None:
        outshape = [int(N**0.5)]*2
    crop = np.prod(outshape)
    oH, oW = outshape
    data = data[:crop].mean(axis=3)
    if batchnorm:
        lo, hi = data.min(), data.max()
    else:
        lo = data.min(axis=1, keepdims=True).min(axis=2, keepdims=True)
        hi = data.max(axis=1, keepdims=True).max(axis=2, keepdims=True)
    if not fullcmap:
        hi = np.maximum(np.abs(lo), np.abs(hi))
        lo = -hi
    #getLogger('ecGAN').debug('%s min %f, max %f', what, lo.min(), hi.max())
    data = draw_heatmap(data, lo, hi, center=center, cmap=cmap)
    #getLogger('ecGAN').debug('data min %s, max %s', str(data.min()), str(data.max()))
    data = (data * 255).clip(0, 255).astype(np.uint8)

    data = align_images(data, oH, oW, H, W, 3)
    imwrite(fpath, data)
    getLogger('ecGAN').info('Saved %s in \'%s\'.', what, fpath)
コード例 #7
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def imnoise(image,model,sigma):
   """
   Syntax: imnoise <model>:<sigma> <input> <output>

The program reads the image <input> and simulates noise to create <output>.
The <model>:<sigma> argument has the same meaning as in tvdenoise.

   """

   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG')[1]
   
   temp_image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(temp_image_file,image)
   assert(model in ['gaussian','laplace','poisson'])
  
   
   command=exec_folder+'/imnoise %s:%f %s %s'%(model,sigma,temp_image_file,output_file)
      
   # calling the executable
   os.system( command)   
   #reading the output from the temporary file
   output=imread(output_file)   
   os.remove(output_file)
   os.remove(temp_image_file)   
   return output
コード例 #8
0
ファイル: __init__.py プロジェクト: paulhoule/tentacruel
    def _make_still(self, pattern):
        if "still" not in pattern:
            return

        last_shot = self._lookup_matching(pattern)[-1]
        overlays = self._load_overlays(pattern)
        content = self._compose_frame(last_shot, overlays)
        imageio.imwrite(self._output / pattern["still"], content, "PNG-FI")
コード例 #9
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def imblur(image,kernel,radius=None,sigma_kernel=None,noise='gaussian',sigma=2,jpegquality=100):
   """
   The imblur program blurs and adds noise to an image.
   
   Parameters
     K:<kernel>             blur kernel for deconvolution
         K:disk:<radius>         filled disk kernel
         K:gaussian:<sigma>      Gaussian kernel
         K:<file>                read kernel from text or image file
     noise:<model>:<sigma>  simulate noise with standard deviation sigma
         noise:gaussian:<sigma>  additive white Gaussian noise
         noise:laplace:<sigma>   Laplace noise
         noise:poisson:<sigma>   Poisson noise
     f:<file>               input file (alternative syntax)
     u:<file>               output file (alternative syntax)
     jpegquality:<number>   quality for saving JPEG images (0 to 100)
   """
   
   
   assert(noise in ['gaussian','laplace','poisson'])
   
   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG' )[1] 
   image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(image_file,image)
 

   command=exec_folder+'/imblur %s %s'%(image_file,output_file)
   if kernel=='disk':
      command+=' K:disk:%s'%(radius)
      assert(sigma_kernel is None)
      
   elif kernel=='gaussian':
      command+=' K:gaussian:%s'%(sigma_kernel)
      assert(radius is None)
      
   elif isinstance(kernel,np.array) :
      pass
   
   
   
   command+=' noise:%s:%f'%(noise,sigma)
     
   # calling the executable
   #subprocess.call( command ,shell=True)
   p = Popen(command, stdout = PIPE, stderr = PIPE,bufsize=1,shell=True)
   for line in iter(p.stdout.readline, ''):
      print (line)
   p.stdout.close()   
   p.wait()
   #reading the output from the temporary file
   
  
   output=imread(output_file)
   os.remove(output_file)
   os.remove(image_file)
   return output
コード例 #10
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def tvdeconv(image,kernel,radius=None,sigma_kernel=None,lamb=50,noise='gaussian',jpegquality=100):
   """
   Parameters
     K:<kernel>             blur kernel for deconvolution
         K:disk:<radius>         filled disk kernel
         K:gaussian:<sigma>      Gaussian kernel
         K:<file>                read kernel from text or image file
     lambda:<value>         fidelity weight
     noise:<model>          noisy model
         noise:gaussian          additive Gaussian noise (default)
         noise:laplace           Laplace noise
         noise:poisson           Poisson noise
     f:<file>               input file (alternative syntax)
     u:<file>               output file (alternative syntax)
     jpegquality:<number>   quality for saving JPEG images (0 to 100)
   """
   
   
   assert(noise in ['gaussian','laplace','poisson'])
   
   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG')[1]    
   image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(image_file,image)
 

   command=exec_folder+'/tvdeconv %s %s'%(image_file,output_file)
   if kernel=='disk':
      command+=' K:disk:%s'%(radius)
      assert(sigma_kernel is None)
      
   elif kernel=='gaussian':
      command+=' K:gaussian:%s'%(sigma_kernel)
      assert(radius is None)
      
   elif isinstance(kernel,np.array) :
      pass
   
   
   command+=' lambda:%f'%lamb
   
   command+=' noise:%s'%noise
     
   # calling the executable
   #subprocess.call( command ,shell=True)
   p = Popen(command, stdout = PIPE, stderr = PIPE,bufsize=1,shell=True)
   for line in iter(p.stdout.readline, ''):
      print (line)
   p.stdout.close()   
   p.wait()
   #reading the output from the temporary file
   
  
   output=imread(output_file)
   os.remove(output_file)
   os.remove(image_file)   
   return output
コード例 #11
0
ファイル: SensorData.py プロジェクト: caskeep/3D-SIS
 def export_color_images(self, output_path, image_size=None, frame_skip=1):
   if not os.path.exists(output_path):
     os.makedirs(output_path)
   print('exporting', len(self.frames)//frame_skip, 'color frames to', output_path)
   for f in range(0, len(self.frames), frame_skip):
     color = self.frames[f].decompress_color(self.color_compression_type)
     if image_size is not None:
       color = cv2.resize(color, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
     imageio.imwrite(os.path.join(output_path, str(f) + '.jpg'), color)
コード例 #12
0
ファイル: clips.py プロジェクト: jackwalker64/reflect
 def _saveImage(self, filepath, **kwargs):
   import math
   for i in range(0, self.frameCount):
     uri = "{0}_{1:0>{width}}.png".format(
       os.path.splitext(filepath)[0],
       i,
       width = math.floor(math.log10(self.frameCount)) + 1
     )
     imageio.imwrite(uri, self.frame(i))
コード例 #13
0
ファイル: test_core.py プロジェクト: imageio/imageio
def test_imwrite_not_array_like():
    class Foo(object):
        def __init__(self):
            pass

    with raises(ValueError):
        imageio.imwrite("foo.bmp", Foo())
    with raises(ValueError):
        imageio.imwrite("foo.bmp", "asd")
コード例 #14
0
ファイル: SensorData.py プロジェクト: caskeep/3D-SIS
 def export_depth_images(self, output_path, image_size=None, frame_skip=1):
   if not os.path.exists(output_path):
     os.makedirs(output_path)
   print('exporting', len(self.frames)//frame_skip, ' depth frames to', output_path)
   for f in range(0, len(self.frames), frame_skip):
     depth_data = self.frames[f].decompress_depth(self.depth_compression_type)
     depth = np.fromstring(depth_data, dtype=np.uint16).reshape(self.depth_height, self.depth_width)
     if image_size is not None:
       depth = cv2.resize(depth, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)
     imageio.imwrite(os.path.join(output_path, str(f) + '.png'), depth)
コード例 #15
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def spatial( image1,image2, processors=1, alpha=18, gamma=1, nscales=100, zoom_factor=0.75, TOL=0.0001, inner_iter=1, outer_iter=15,verbose=False):
   """
   Enhance the colors of an image using the method decribed in    
      Automatic Color Enhancement (ACE) and its Fast Implementation
      Pascal Getreuer IPOL 2012
      
   Usage: 
   inputs 

	Images      : list of images as a list of numpy arrays 
	processors  : number of processors to run the method
	alpha       : weight of the smoothing term
	gamma       : weight of the gradient constancy term
	nscales     : desired number of scales
	zoom_factor : downsampling factor 
	TOL         : stopping criterion threshold for the numerical scheme
	inner_iter  : number of inner iterations in the numerical scheme
	outer_iter  : number of outer iterations in the numerical scheme	
	verbose     : 0 or 1, for quiet or verbose behaviour
   return 
      	list of optical flow images as numpy arrays
   """   
   # todo : you can copy the documentation from the C++ file
   # it is prefered to clean the description for it to better 
   # reflect the python binding interface
  
   #  create temporary folder where temporary file while be read an created by the executable
   tmp_folder=tempfile.mkdtemp()
   if not (isinstance([],list)):
      BaseException('your input image should be pu in a python list')
   
   # todo : save the input images 

   imwrite(os.path.join(tmp_folder,'image1.png'),image1)
   imwrite(os.path.join(tmp_folder,'image2.png'),image2)

   command='cd %s'%tmp_folder+';'# moving in the temporary folder
   command+=os.path.join(binary_directory1,'main ')+' image1.png image2.png flow.uv %f %f %d %f %f %d %d %s %d'%(\
      alpha, gamma, nscales, zoom_factor, TOL, inner_iter, outer_iter,tmp_folder,verbose)
      
   # calling the executable
   os.system( command)   

   #todo: read the output from the temporary file
   output_file=os.path.join(tmp_folder,'flow.uv')
   with open(output_file,'r') as file:
      file.readline()
      file.readline()
      file.readline()  
      flow=np.fromfile(file,dtype=np.float32)[57:].reshape(2,image1.shape[0],image1.shape[1]).transpose((1,2,0))

   # delete the temporary files
   shutil.rmtree(tmp_folder)

   return flow
コード例 #16
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def asift(image1,image2,resize_input=0):# todo change the name of the function

   """
        *******************************************************************************
	***************************  ASIFT image matching  **************************** 
	******************************************************************************* 
	Usage: " << argv[0] << " imgIn1.png imgIn2.png imgOutVert.png imgOutHori.png  
	matchings.txt keys1.txt keys2.txt [Resize option: 0/1] 
	- imgIn1.png, imgIn2.png: input images (in PNG format). 
	- imgOutVert.png, imgOutHori.png: output images (vertical/horizontal concatenated, 
	  in PNG format.) The detected matchings are connected by write lines.
	- matchings.txt: coordinates of matched points (col1, row1, col2, row2). 
	- keys1.txt keys2.txt: ASIFT keypoints of the two images.
	- [optional 0/1]. 1: input images resize to 800x600 (default). 0: no resize. 
   	******************************************************************************* 
	*********************  Jean-Michel Morel, Guoshen Yu, 2010 ******************** 
	******************************************************************************* 

   """# todo : copy the documentation from the C++ file
  
 
   # save input images 
   tmp_folder=tempfile.mkdtemp()
   imwrite(os.path.join(tmp_folder,'imgIn1.png'),image1)
   imwrite(os.path.join(tmp_folder,'imgIn2.png'),image2)

   command='cd %s'%tmp_folder+';'# moving in the temporary folder
   command+=source_directory+'/demo_ASIFT imgIn1.png imgIn2.png imgOutVert.png imgOutHori.png matchings.txt keys1.txt keys2.txt %d'%resize_input
   

   # calling the executable
   os.system( command)   

   # todo: read the outputs from the temporary file
   # it call help to open the temporary folder in your file explorer
   #imgOutVert=imread(os.path.join(tmp_folder,'imgOutVert.png'))
   #imgOutHori=imread(os.path.join(tmp_folder,'imgOutHori.png'))

   with open(os.path.join(tmp_folder,'matchings.txt'),'r') as file:
      nbmatches=int(file.readline())#skipping the first line
      matchings=np.loadtxt(file)
      
   with open(os.path.join(tmp_folder,'keys1.txt'),'r') as file:
      file.readline()#skipping the first line that contain the size of the matrix
      keys1=np.loadtxt(file)     
      
   with open(os.path.join(tmp_folder,'keys2.txt'),'r') as file:
      file.readline()#skipping the first line that contain the size of the matrix
      keys2=np.loadtxt(file)
            
   # todo : delete the temporary files
   shutil.rmtree(tmp_folder)

   return matchings,keys1,keys2
コード例 #17
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def ace(image,alpha,omega,sigma=None,method='interp',levels=None,degree=None,jpeg_quality=100):
   """
   Usage: ace [options] input output
   
   where "input" and "output" are BMP files (JPEG, PNG, or TIFF files can also 
   be used if the program is compiled with libjpeg, libpng, and/or libtiff).  
   
   Options:
     -a <number>  alpha, stronger implies stronger enhancement
     -w <omega>   omega, spatial weighting function, choices are
                  1/r      default ACE, omega(x,y) = 1/sqrt(x^2+y^2)
                  1        constant, omega(x,y) = 1
                  G:#      Gaussian, where # specifies sigma,
                           omega(x,y) = exp(-(x^2+y^2)/(2 sigma^2))
     -m <method>  method to use for fast computation, choices are
                  interp:# interpolate s_a(L - I(x)) with # levels
                  poly:#   polynomial s_a with degree #
   
     -q <number>  quality for saving JPEG images (0 to 100)
   """

   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG')[1]
   
   temp_image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(temp_image_file,image)


   if method=='interp':
      method_str='interp:%d'%levels
      assert(degree is None)
   elif ethod=='poly':
      method_str='poly:%d'%degree
      assert(levels is None)
   if omega=='G':
      omega_str='G %f'%sigma
   else:
      omega_str=omega
      assert(sigma is None)
   
   options='-a %f -w %s -m %s %s %s'%(alpha,omega_str,method_str,temp_image_file,output_file)
   if False: #using the executable
      command=exec_folder+'/ace %s'%options
      # calling the executable
      os.system( command)
   else:
      print (options.split(' '))
      _wrapper.main(options.split(' '))
   #reading the output from the temporary file
   output=imread(output_file)  
   os.remove(output_file)

   os.remove(temp_image_file)
   return output
コード例 #18
0
ファイル: wrapper.py プロジェクト: martinResearch/PyIPOL
def inpaint(image,mask,method='nlmeans',patch=9,iters=300,scales=7,coarse=0.3,conft=5,confa=0.1,lamb=0.05,init='poisson',psigma=10000):
   """
   options
       -method    method name (nlmeans)
       -patch     patch side (9)
       -iters     inpainting iterations (300)
       -scales    scales amount (7)
       -coarse    coarsest rate (0.3)
       -conft     confidence decay time (5)
       -confa     confidence asymptotic value (0.1)
       -lambda    lambda (0.05)
       -init      initialization type [poisson/black/avg/none] (poisson)
       -psigma    Gaussian patch weights (10000)
       -showpyr   PREFIX write intermediate pyramid results
       -shownnf   FILENAME write illustration of the final NNF
   """

   #saving input image to a temporary file
   output_file=tempfile.mkstemp('.PNG' )[1]  
   image_file =tempfile.mkstemp('.PNG')[1]
   imwrite(image_file,image)
   mask_file =tempfile.mkstemp('.PNG')[1]
   imwrite(mask_file,mask)

   command=exec_folder+'/build/Inpainting %s %s %s'%(image_file,mask_file,output_file)
   command+=' -method %s'%method
   command+=' -patch %d'%patch
   command+=' -iters %d'%iters
   command+=' -scales %d'%scales
   command+=' -coarse %f'%coarse
   command+=' -conft %f'%conft
   command+=' -confa %f'%confa
   command+=' -lambda %f'%lamb
   command+=' -init%s'%init
   command+=' -psigma %f'%psigma
     
   # calling the executable
   #subprocess.call( command ,shell=True)
   p = Popen(command, stdout = PIPE, stderr = PIPE,bufsize=1,shell=True)
   for line in iter(p.stdout.readline, ''):
      print (line)
   p.stdout.close()   
   p.wait()
   #reading the output from the temporary file
   
   l=glob.glob(output_file+'_*')# trick because the output file name is not excactly the one expected , things are added at the end
   assert(len(l)==1)
   output=imread(l[0])
   os.remove(l[0])
   os.remove(output_file)
   os.remove(image_file)
   os.remove(mask_file)
   return output
コード例 #19
0
ファイル: test_core.py プロジェクト: imageio/imageio
def test_imwrite_not_subclass(tmpdir):
    class Foo(object):
        def __init__(self):
            pass

        def __array__(self, dtype=None):
            return np.zeros((4, 4), dtype=dtype)

    filename = os.path.join(str(tmpdir), "foo.bmp")
    imageio.imwrite(filename, Foo())
    im = imageio.imread(filename)
    assert im.shape == (4, 4)
コード例 #20
0
def _test():
    while True:
        try:
            if not BufFrameQ.empty():
                frm = BufFrameQ.get()
                ts = TStampQ.get()
                filename = datetime.fromtimestamp(ts).strftime('%m-%d_%H:%M:%S.%f') + '.jpg'
                imageio.imwrite(filename, frm)
                # print out log
                print 'Saved image', filename
        except KeyboardInterrupt:
            break
コード例 #21
0
ファイル: test_tifffile.py プロジェクト: ghisvail/imageio
def test_tifffile_reading_writing():
    """ Test reading and saving tiff """
    
    need_internet()  # We keep a test image in the imageio-binary repo
    
    im2 = np.ones((10, 10, 3), np.uint8) * 2

    filename1 = os.path.join(test_dir, 'test_tiff.tiff')

    # One image
    imageio.imsave(filename1, im2)
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 1

    # Multiple images
    imageio.mimsave(filename1, [im2, im2, im2])
    im = imageio.imread(filename1)
    ims = imageio.mimread(filename1)
    assert (im == im2).all()
    assert len(ims) == 3, ims[0].shape

    # remote multipage rgb file
    filename2 = get_remote_file('images/multipage_rgb.tif')
    img = imageio.mimread(filename2)
    assert len(img) == 2
    assert img[0].shape == (3, 10, 10)

    # Mixed
    W = imageio.save(filename1)
    W.set_meta_data({'planarconfig': 'planar'})
    assert W.format.name == 'TIFF'
    W.append_data(im2)
    W.append_data(im2)
    W.close()
    #
    R = imageio.read(filename1)
    assert R.format.name == 'TIFF'
    ims = list(R)  # == [im for im in R]
    assert (ims[0] == im2).all()
    meta = R.get_meta_data()
    assert meta['orientation'] == 'top_left'
    # Fail
    raises(IndexError, R.get_data, -1)
    raises(IndexError, R.get_data, 3)

    # Ensure imwrite write works round trip
    filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
    R = imageio.imread(filename1)
    imageio.imwrite(filename3, R)
    R2 = imageio.imread(filename3)
    assert (R == R2).all()
コード例 #22
0
ファイル: mwindow.py プロジェクト: Entscheider/SeamEater
 def __onSaveImage(self):
     filename = gui.QFileDialog.getSaveFileName()
     if (qtver == 5):
         filename = filename[0]
     else:
         filename = str(filename)
     if (len(filename) == 0):
         return
     #self.imgwdg.getPixmap().save(filename) # Use Qt for saving images.
     try:
         imwrite(filename, self.imgwdg.getNPArray()) # Use Scipy for saving images.
     except Exception as e:
         gui.QMessageBox.critical(self, "Exception", "An error is thrown while saving: {}".format(e))
コード例 #23
0
ファイル: writeImg.py プロジェクト: simon-r/SerialPhotoMerge
    def write(self, imagearr,  file_name=None):
        if file_name != None:
            self.file_name = file_name
        elif self.file_name != None:
            pass
        else:
            raise Exception(" %s , Undefined file name: " %
                            sys._getframe().f_code.co_name)

        imagearr.un_normalize()

        imageio.imwrite(self.file_name, imagearr.get_uint_array(
            tdepth=self.out_color_depth), None)
コード例 #24
0
ファイル: ConvNet.py プロジェクト: abiaozsh/MyCode
def saveImagesMono(images, size, path):
    img = images
    h, w = img.shape[1], img.shape[2]
    merge_img = np.zeros((h * size[0], w * size[1], 1),dtype="float32")
    for idx, image in enumerate(images):
        i = idx % size[1]
        j = idx // size[1]
        merge_img[j*h:j*h+h, i*w:i*w+w, :] = image
    
    #Max value == min value, ambiguous given dtype 因为值全是0
    merge_img = np.clip(merge_img, -0.5, 0.5)
    imageio.imwrite(path, merge_img)
    return
コード例 #25
0
ファイル: gensuperpixel.py プロジェクト: rjw57/mlmask
def main():
    opts = docopt.docopt(__doc__)
    datadir = opts['<datadir>']
    outputdir = os.path.join(datadir, 'superpixel')

    if not os.path.isdir(outputdir):
        os.makedirs(outputdir)

    for input_fn in glob.glob(os.path.join(datadir, 'input', '*.JPG')):
        input_base = os.path.join(outputdir,
            os.path.splitext(os.path.basename(input_fn))[0])
        if os.path.isfile(input_base + '.npz'):
            print('Skipping since {} exists'.format(input_base))
            continue

        print('Input: ' + input_fn)
        input_im = imageio.imread(input_fn)

        print('Converting to LAB colorspace')
        lab_im = skimcolor.rgb2lab(input_im)

        labels = np.zeros(input_im.shape[:2])

        print('Segmenting (watershed)...')
        labels = skimseg.join_segmentations(labels, ws_segment(lab_im[..., 0]))

        print('Segmenting (slic)...')
        # Set number of segments so each segment is roughly seg_size*seg_size in area
        seg_size = 128
        n_segments = 1 + int(input_im.shape[0] * input_im.shape[1] /
            (seg_size*seg_size))
        labels = skimseg.join_segmentations(labels,
            skimseg.slic(lab_im, n_segments=n_segments, sigma=1,
                compactness=0.1, multichannel=True, convert2lab=False,
                slic_zero=True)
        )

        print('Enforcing connectivity')
        # Enforce connectivity. This is important otherwise superpixels may be
        # spread over image.
        labels = skimmeas.label(labels)

        print('Saving output...')

        # Write visualisation
        imageio.imwrite(input_base + '-visualisation.jpg',
            skimseg.mark_boundaries(input_im, labels))

        # Write output
        np.savez_compressed(input_base + '.npz', labels=labels)
コード例 #26
0
ファイル: plot.py プロジェクト: chr5tphr/ecGAN
def save_aligned_image(data, fpath, bbox, what='input data', outshape=[5, 6]):
    if isinstance(data, nd.NDArray):
        data = asnumpy(data)
    N, C, H, W = data.shape
    data = data.transpose([0, 2, 3, 1])
    if outshape is None:
        outshape = [int(N**0.5)]*2
    crop = np.prod(outshape)
    data = data[:crop]
    oH, oW = outshape
    indat = ((data - bbox[0]) * 255/(bbox[1]-bbox[0])).clip(0, 255).astype(np.uint8)
    indat = align_images(indat, oH, oW, H, W, C)
    imwrite(fpath, indat)
    getLogger('ecGAN').info('Saved %s in \'%s\'.', what, fpath)
コード例 #27
0
ファイル: ConvNet.py プロジェクト: abiaozsh/MyCode
def saveImages(images, size, path):
    img = images
    h, w = img.shape[1], img.shape[2]
    merge_img = np.zeros((h * size[0], w * size[1], 3),dtype="float32")
    for idx, image in enumerate(images):
        i = idx % size[1]
        j = idx // size[1]
        merge_img[j*h:j*h+h, i*w:i*w+w, :] = image
    
    merge_img = np.clip(merge_img, -0.5, 0.5)
    merge_img[0,0,0] = 0.5
    merge_img[0,0,1] = -0.5
    imageio.imwrite(path, merge_img)
    return
コード例 #28
0
ファイル: image.py プロジェクト: woozey/hyperspy
def file_writer(filename, signal, file_format='png', **kwds):
    """Writes data to any format supported by PIL

        Parameters
        ----------
        filename: str
        signal: a Signal instance
        file_format : str
            The fileformat defined by its extension that is any one supported by
            PIL.
    """
    data = signal.data
    if rgb_tools.is_rgbx(data):
        data = rgb_tools.rgbx2regular_array(data)
    imwrite(filename, data)
コード例 #29
0
ファイル: visualize.py プロジェクト: zsdonghao/tensorlayer
def save_image(image, image_path='_temp.png'):
    """Save a image.

    Parameters
    -----------
    image : numpy array
        [w, h, c]
    image_path : str
        path

    """
    try:  # RGB
        imageio.imwrite(image_path, image)
    except Exception:  # Greyscale
        imageio.imwrite(image_path, image[:, :, 0])
コード例 #30
0
ファイル: debug_apt_interface.py プロジェクト: mkabra/poseTF
def deepcut_outfn(data, outdir, count, fis, save_data):
    # pass count as array to pass it by reference.
    if conf.imgDim == 1:
        im = data[0][:, :, 0]
    else:
        im = data[0]
    img_name = os.path.join(outdir, 'img_{:06d}.png'.format(count[0]))
    imageio.imwrite(img_name, im)
    locs = data[1]
    bparts = conf.n_classes
    for b in range(bparts):
        fis[b].write('{}\t{}\t{}\n'.format(count[0], locs[b, 0], locs[b, 1]))
    mod_locs = np.insert(np.array(locs), 0, range(bparts), axis=1)
    save_data.append([img_name, im.shape, mod_locs])
    count[0] += 1
コード例 #31
0
ファイル: test.py プロジェクト: decei/image
import imageio
import numpy
from secrets import randbelow

SEED = 5

numpy.random.seed(SEED)
pix = numpy.random.randint(256, size=(10, 10, 3))
pic = imageio.imwrite("randomii.png", pix.astype(numpy.uint8))

A = numpy.array([1, 2, 3, 4])
B = numpy.array([1, 2, 3, 4])
C = numpy.divide(A, 2)

print(C)
コード例 #32
0
#Imageio 是一个读写照片数据,包括动态图,video,volumetric data,and scientific formats的库,
########example
##https://imageio.readthedocs.io/en/latest/examples.html
import imageio
im = imageio.imread('imageio:chelsea.png')  # read a standard image
im.shape  # im is a numpy array
(300, 451, 3)
imageio.imwrite('~/chelsea-gray.jpg', im[:, :, 0])

###
import imageio
reader = imageio.get_reader('imageio:cockatoo.mp4')
for i, im in enumerate(reader):
    print('Mean of frame %i is %1.1f' % (i, im.mean()))
#使用imageio 制作动图
import imageio


def create_gif(image_list, gif_name):
    frames = []
    for image_name in image_list:
        frames.append(imageio.imread(image_name))
    # Save them as frames into a gif
    imageio.mimsave(gif_name, frames, 'GIF', duration=0.5)

    return


def main():
    image_list = glob.glob(r"D:\pythonrun\20180504rnn\*.jpg")
    print(image_list)
コード例 #33
0
    with torch.no_grad():
        image, _, _ = render_image(width, height, focal, pose, render_near,
                                   render_far, coarse_model, fine_model,
                                   render_coarse_sample_num,
                                   render_fine_sample_num)
        demo_images.append(image)
        demo_targets.append(target[..., :3])
for pose, target in zip(poses['val']['in'][:2], images['val']['in'][:2]):
    with torch.no_grad():
        image, _, _ = render_image(width, height, focal, pose, render_near,
                                   render_far, coarse_model, fine_model,
                                   render_coarse_sample_num,
                                   render_fine_sample_num)
        demo_images.append(image)
        demo_targets.append(target[..., :3])
for pose, target in zip(poses['val']['ex'][:2], images['val']['ex'][:2]):
    with torch.no_grad():
        image, _, _ = render_image(width, height, focal, pose, render_near,
                                   render_far, coarse_model, fine_model,
                                   render_coarse_sample_num,
                                   render_fine_sample_num)
        demo_images.append(image)
        demo_targets.append(target[..., :3])

demo_image_path = os.path.join(log_path, 'demo.jpg')
demo_image = np.concatenate(
    [np.concatenate(demo_images, 1),
     np.concatenate(demo_targets, 1)], 0)
imageio.imwrite(demo_image_path, to8b(demo_image))
print('Demo image write to:', demo_image_path)
コード例 #34
0
        8: [244, 35, 232],  # Sidewalks
        9: [107, 142, 35],  # Vegetation
        10: [0, 0, 255],  # Vehicles
        11: [102, 102, 156],  # Walls
        12: [220, 220, 0]  # TrafficSigns
    }

    depth_seg = imageio.imread(file)
    # Build a blank image
    result = np.zeros((depth_seg.shape[0], depth_seg.shape[1], 3))
    # Person
    result[np.where(depth_seg[:, :, 0] == 4)] = [255, 0, 0]
    # Combine road and roadlines
    result[np.where(depth_seg[:, :, 0] == 6)] = [0, 255, 0]
    result[np.where(depth_seg[:, :, 0] == 7)] = [0, 255, 0]
    # Car and remove car hood
    result[np.where(depth_seg[:490, :, 0] == 10)] = [0, 0, 255]
    return result.astype(np.uint8)


# Sem seg
total_num = 0
for i, file in enumerate(glob(seg_path)):
    file_name = os.path.basename(file)
    result = labels_to_seg2(file)

    imageio.imwrite(seg_out_dir + '/' + file_name, result, format='png')
    total_num += 1

print(total_num, "images has been processed.")
コード例 #35
0
print(tf.__version__)

# tfds works in both Eager and Graph modes
tf.enable_eager_execution()

# tf_flowers total = 3,670
# img_dim = 50

# See available datasets
print(tfds.list_builders())

class_names = ['dandelion', 'daisy', 'tulips', 'sunflowers', 'roses']
ds_all = tfds.load(name="tf_flowers", split="train")
# ds_all = ds_all.shuffle(buffer_size=100)
iterator = ds_all.make_one_shot_iterator()

for i in range(3670):
    if i % 100 == 0:
        print("processing " + str(i))
    mnist_example = iterator.get_next()
    # mnist_example, = ds_all.take(1)
    image, label = mnist_example["image"], mnist_example["label"]
    image = image / 255
    image = tf.image.resize_images(image, (img_dim, img_dim))
    # image = image.reshape(1, 50, 50)
    # image = image_fit.resize_to_fit(image, 12, 22)
    # image = tf.image.rgb_to_grayscale(image)
    label_name = label_number_to_name(label)
    imageio.imwrite(imgs_folder + "/" + label_name + "/" + str(i) + ".jpg",
                    image)
コード例 #36
0
import cv2
import imageio
imageio.plugins.ffmpeg.download()

# Cascade yükleme
face_cascade = cv2.CascadeClassifier('haarcascade-frontalface-default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade-eye.xml')


# Tanıma yapacak fonksiyon
def detect(frame):
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = frame[y:y + h, x:x + w]
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
        for (ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0),
                          2)
    return frame


# Proje klasörü içerisindeki image.jpg dosyasında yüz ve göz tesbiti yapılıyor.
# Daha sonra output.jpg dosyasına yazılıyor.
# Dosya isimlerini değiştirebilirsiniz.
image = imageio.imread('2.jpeg')
image = detect(frame=image)
imageio.imwrite('output.png', image)
コード例 #37
0
        #plt.imshow(full_inputs_test[i+n])
    '''
    current_directory = os.getcwd()
    final_directory = os.path.join(current_directory, r'cnn_train')
    if not os.path.exists(final_directory):
        os.makedirs(final_directory)

    for i in range(n):
        test_directory = os.path.join(final_directory,
                                      r'cnn_train' + str(i) + '/')
        #test_directory = os.path.join(final_directory, r'test'+str(i+n) +'/')
        if not os.path.exists(test_directory):
            os.makedirs(test_directory)

        imwrite(
            str(test_directory) + 'pred_mask.png',
            (np.clip(combined_masks[i], 0, 1) * 255).astype(np.uint8))
        imwrite(
            str(test_directory) + 'input.png',
            (combined_input[i] * 255).astype(np.uint8))
        imwrite(
            str(test_directory) + 'pred_mask_TH.png',
            (thresh_masks[i] * 255).astype(np.uint8))
        imwrite(
            str(test_directory) + 'GT_mask.png',
            (combined_GT_masks[i] * 255).astype(np.uint8))

    n = 15
    n_patches = n * (512 // roi)**2

    test_mask = sess.run(pred_mask_test,
コード例 #38
0
def make_efficient_example(ex):
    image_relpath = ex.image_path
    max_rotate = np.pi / 6
    padding_factor = 1 / 0.85
    scale_up_factor = 1 / 0.85
    scale_down_factor = 1 / 0.85
    shift_factor = 1.2
    base_dst_side = 256

    box_center = boxlib.center(ex.bbox)
    s, c = np.sin(max_rotate), np.cos(max_rotate)
    w, h = ex.bbox[2:]
    rot_bbox_side = max(c * w + s * h, c * h + s * w)
    rot_bbox = boxlib.box_around(box_center, rot_bbox_side)

    scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor,
                       1)
    expansion_factor = padding_factor * shift_factor * scale_down_factor
    expanded_bbox = boxlib.expand(rot_bbox, expansion_factor)
    expanded_bbox = boxlib.intersect(expanded_bbox,
                                     np.array([0, 0, 2048, 2048]))

    new_camera = ex.camera.copy()
    new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2]
    new_camera.scale_output(scale_factor)
    new_camera.undistort()
    dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]])

    new_im_relpath = ex.image_path.replace('3dhp', f'3dhp_downscaled')
    new_im_path = os.path.join(paths.DATA_ROOT, new_im_relpath)
    if not (util.is_file_newer(new_im_path, "2019-11-14T23:32:07")
            and improc.is_image_readable(new_im_path)):
        im = improc.imread_jpeg(f'{paths.DATA_ROOT}/{image_relpath}')
        new_im = cameralib.reproject_image(im, ex.camera, new_camera,
                                           dst_shape)
        util.ensure_path_exists(new_im_path)
        imageio.imwrite(new_im_path, new_im)

    new_bbox_topleft = cameralib.reproject_image_points(
        ex.bbox[:2], ex.camera, new_camera)
    new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor])

    mask_rle_relpath = new_im_path.replace('Images', 'FGmaskImages').replace(
        '.jpg', '.pkl')
    mask_rle_path = os.path.join(paths.DATA_ROOT, mask_rle_relpath)
    if util.is_file_newer(mask_rle_path, "2020-03-11T20:46:46"):
        mask_runlength = util.load_pickle(mask_rle_path)
    else:
        mask_relpath = ex.image_path.replace('Images', 'FGmaskImages').replace(
            '.jpg', '.png')
        mask = imageio.imread(os.path.join(paths.DATA_ROOT, mask_relpath))
        mask_reproj = cameralib.reproject_image(mask, ex.camera, new_camera,
                                                dst_shape)
        mask_runlength = get_mask_with_highest_iou(mask_reproj, new_bbox)
        util.dump_pickle(mask_runlength, mask_rle_path)

    return p3ds.Pose3DExample(new_im_relpath,
                              ex.world_coords,
                              new_bbox,
                              new_camera,
                              mask=mask_runlength,
                              univ_coords=ex.univ_coords)
コード例 #39
0
ファイル: throw_ai.py プロジェクト: sholtodouglas/ur5pybullet
def save_image(rgb):

    imageio.imwrite('rgb.jpg', rgb)
コード例 #40
0
            
            X = torch.cat([
                torch.zeros(1,3,360,420),
                X,
                torch.zeros(1,3,360,420)                
            ], axis=0).unsqueeze(0).cuda()

            
        else:
            
            im1 = to_tensor(Image.open(os.path.join(folderpath, 'frame09.png')))
            im2 = to_tensor(Image.open(os.path.join(folderpath, 'frame10.png')))
            im3 = to_tensor(Image.open(os.path.join(folderpath, 'frame11.png')))
            im4 = to_tensor(Image.open(os.path.join(folderpath, 'frame12.png')))
            
            X = torch.stack([im1, im2, im3, im4]).unsqueeze(0).cuda()
            
            
        y_hat = model(X).clamp(0,1).mul(255).cpu().detach().int().squeeze(0).permute(1,2,0)
        y_hat = y_hat.numpy().astype(np.uint8)
        print(seq)
        t1 = time.time()
        output_folder_sequence = os.path.join(OUTPUT_FOLDER, seq)
        os.makedirs(output_folder_sequence, exist_ok=True)
        imageio.imwrite(os.path.join(output_folder_sequence, f'frame10i11.png'), im=y_hat)
        
        if seq=='Urban':
            print(f'Running time Urban: {t1-t0} seconds')
        

コード例 #41
0
# Normalization
max_value = img_slice.max()
img_slice = img_slice / max_value
img_slice = img_slice * 255;
img_slice_uint8 = img_slice.astype(np.uint8)


nii_filename = input_full_filename

#extract_all_slices_to_png(nii_filename)
"""

#TESTE 2
# Saving image
"""
import imageio as iio
iio.imwrite('teste.png',img_slice_uint8)


print('Forma da matriz de dados da imagem adni: ' + repr(adni_img_data.shape))
print('\nTipo da variavel img_slice: ' + repr(type(img_slice)))
print('png file full path: ' + repr(png_full_filename))


#png_img = Image.fromarray(img_slice)
#png_img.save("adni_png.jpeg")
#plt.imsave('adni.png',png_img)

print('Tipo de dados do array img_slice: ' + repr(type(img_slice)))
コード例 #42
0
ファイル: utils.py プロジェクト: huohuotm/MIT6.S094-DeepTesla
def visualize(epoch_id,
              machine_steering,
              out_dir,
              perform_smoothing=False,
              verbose=False,
              verbose_progress_step=100,
              frame_count_limit=None):
    epoch_dir = params.data_dir
    human_steering = get_human_steering(epoch_id)
    assert len(human_steering) == len(machine_steering)

    # testing: artificially magnify steering to test steering wheel visualization
    # human_steering = list(np.array(human_steering) * 10)
    # machine_steering = list(np.array(machine_steering) * 10)

    # testing: artificially alter machine steering to test that the disagreement coloring is working
    # delta = 0
    # for i in xrange(len(machine_steering)):
    #     delta += random.uniform(-1, 1)
    #     machine_steering[i] += delta

    if perform_smoothing:
        machine_steering = list(smooth(np.array(machine_steering)))

    steering_min = min(np.min(human_steering), np.min(machine_steering))
    steering_max = max(np.max(human_steering), np.max(machine_steering))

    assert os.path.isdir(epoch_dir)

    # use mp4 instead of mkv
    # front_vid_path = join_dir(epoch_dir, 'epoch{:0>2}_front.mkv'.format(epoch_id))
    vid_mp4_path = join_dir(epoch_dir,
                            'epoch{:0>2}_front.mp4'.format(epoch_id))
    assert os.path.isfile(vid_mp4_path)

    dash_vid_path = join_dir(epoch_dir,
                             'epoch{:0>2}_dash.mkv'.format(epoch_id))
    dash_exists = os.path.isfile(dash_vid_path)

    # front_cap = cv2.VideoCapture(front_vid_path)
    dash_cap = cv2.VideoCapture(dash_vid_path) if dash_exists else None

    assert os.path.isdir(out_dir)
    vid_size = video_resolution_to_size('720p', width_first=True)
    # out_path = join_dir(out_dir, 'epoch{:0>2}_human_machine.mkv'.format(epoch_id))
    # vw = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc('X','2','6','4'), 30, vid_size)
    w, h = vid_size

    # -----------------using imageio read/write video---------------------------------#
    import imageio
    out_path_mp4 = join_dir(out_dir,
                            'epoch{:0>2}_human_machine.mp4'.format(epoch_id))
    reader = imageio.get_reader(vid_mp4_path, 'ffmpeg')
    video_fps = reader.get_meta_data()['fps']
    writer = imageio.get_writer(out_path_mp4, fps=video_fps)
    # --------------------------------------------------------------------------------#

    # for f_cur in xrange(len(machine_steering)):
    for f_cur in range(len(machine_steering)):
        if (f_cur != 0) and (f_cur % verbose_progress_step == 0):
            print('completed {} of {} frames'.format(f_cur,
                                                     len(machine_steering)))

        if (frame_count_limit is not None) and (f_cur >= frame_count_limit):
            break

        # rret, rimg = front_cap.read()
        # assert rret

        rimg = reader.get_data(f_cur)

        if dash_exists:
            dret, dimg = dash_cap.read()
            assert dret
        else:
            dimg = rimg.copy()
            dimg[:] = (0, 0, 0)

        ry0, rh = 80, 500
        dimg = dimg[100:, :930]
        dimg = cv2_resize_by_height(dimg, h - rh)

        fimg = rimg.copy()
        fimg[:] = (0, 0, 0)
        fimg[:rh] = rimg[ry0:ry0 + rh]
        dh, dw = dimg.shape[:2]
        fimg[rh:, :dw] = dimg[:]

        ########################## plot ##########################
        plot_size = (500, dh)
        win_before, win_after = 150, 150

        xx, hh, mm = [], [], []
        # for f_rel in xrange(-win_before, win_after+1):
        for f_rel in range(-win_before, win_after + 1):
            f_abs = f_cur + f_rel
            if f_abs < 0 or f_abs >= len(machine_steering):
                continue
            xx.append(f_rel / 30)
            hh.append(human_steering[f_abs])
            mm.append(machine_steering[f_abs])

        fig = plt.figure()
        axis = fig.add_subplot(1, 1, 1)

        steering_range = max(abs(steering_min), abs(steering_max))
        #ylim = [steering_min, steering_max]
        ylim = [-steering_range, steering_range]
        # ylim[0] = min(np.min(hh), np.min(mm))
        # ylim[1] = max(np.max(hh), np.max(mm))

        axis.set_xlabel('Current Time (secs)')
        axis.set_ylabel('Steering Angle')
        axis.axvline(x=0, color='k', ls='dashed')
        axis.plot(xx, hh)
        axis.plot(xx, mm)
        axis.set_xlim([-win_before / 30, win_after / 30])
        axis.set_ylim(ylim)
        #axis.set_ylabel(y_label, fontsize=18)
        axis.label_outer()
        #axes.append(axis)

        buf = io.BytesIO()
        # http://stackoverflow.com/a/4306340/627517
        sx, sy = plot_size
        sx, sy = round(sx / 100, 1), round(sy / 100, 1)

        fig.set_size_inches(sx, sy)
        fig.tight_layout()
        fig.savefig(buf, format="png", dpi=100)
        buf.seek(0)
        buf_img = PIL.Image.open(buf)
        pimg = np.asarray(buf_img)
        plt.close(fig)

        pimg = cv2.resize(pimg, plot_size)
        pimg = pimg[:, :, :3]

        ph, pw = pimg.shape[:2]
        pimg = 255 - pimg

        fimg[rh:, -pw:] = pimg[:]

        ####################### human steering wheels ######################
        wimg = imread(os.path.abspath("images/wheel-tesla-image-150.png"),
                      cv2.IMREAD_UNCHANGED)

        human_wimg = rotate_image(wimg, -human_steering[f_cur])
        wh, ww = human_wimg.shape[:2]
        fimg = overlay_image(fimg,
                             human_wimg,
                             y_offset=rh + 50,
                             x_offset=dw + 60)

        ####################### machine steering wheels ######################
        disagreement = abs(machine_steering[f_cur] - human_steering[f_cur])
        machine_wimg = rotate_image(wimg, -machine_steering[f_cur])
        red_machine_wimg = machine_wimg.copy()
        green_machine_wimg = machine_wimg.copy()
        red_machine_wimg[:, :, 2] = 255
        green_machine_wimg[:, :, 1] = 255
        #r = disagreement / (steering_max - steering_min)
        max_disagreement = 10
        r = min(1., disagreement / max_disagreement)
        g = 1 - r
        assert r >= 0
        assert g <= 1
        machine_wimg = cv2.addWeighted(red_machine_wimg, r, green_machine_wimg,
                                       g, 0)
        wh, ww = machine_wimg.shape[:2]
        fimg = overlay_image(fimg,
                             machine_wimg,
                             y_offset=rh + 50,
                             x_offset=dw + 260)

        ####################### text ######################
        timg_green_agree = imread(
            os.path.abspath("images/text-green-agree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_ground_truth = imread(
            os.path.abspath("images/text-ground-truth.png"),
            cv2.IMREAD_UNCHANGED)
        timg_learned_control = imread(
            os.path.abspath("images/text-learned-control.png"),
            cv2.IMREAD_UNCHANGED)
        timg_red_disagree = imread(
            os.path.abspath("images/text-red-disagree.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_autopilot = imread(
            os.path.abspath("images/text-tesla-control-autopilot.png"),
            cv2.IMREAD_UNCHANGED)
        timg_tesla_control_human = imread(
            os.path.abspath("images/text-tesla-control-human.png"),
            cv2.IMREAD_UNCHANGED)

        fimg = overlay_image(fimg,
                             timg_tesla_control_autopilot,
                             y_offset=rh + 8,
                             x_offset=dw + 83)
        fimg = overlay_image(fimg,
                             timg_learned_control,
                             y_offset=rh + 8,
                             x_offset=dw + 256)
        fimg = overlay_image(fimg,
                             timg_ground_truth,
                             y_offset=rh + 205,
                             x_offset=dw + 90)
        fimg = overlay_image(fimg,
                             timg_red_disagree,
                             y_offset=rh + 205,
                             x_offset=dw + 230)
        fimg = overlay_image(fimg,
                             timg_green_agree,
                             y_offset=rh + 205,
                             x_offset=dw + 345)

        if (frame_count_limit is not None) and (frame_count_limit == 1):
            # cv2.imwrite(out_path.replace('mkv', 'jpg'), fimg)
            imageio.imwrite(out_path_mp4.replace('mp4', 'jpg'), fimg)
            sys.exit()

        writer.append_data(fimg)
        # vw.write(fimg)

    # front_cap.release()
    if dash_exists:
        dash_cap.release()
    # vw.release()

    writer.close()
コード例 #43
0
ファイル: transformpict.py プロジェクト: ukonline/uCourse
# transformpict.py
# Author: Sébastien Combéfis
# Version: April 15, 2020

import imageio
import scipy as np

# Opening the tiger picture you can find here:
# https://www.flickr.com/photos/31004716@N00/3732644607
# The returned ndarray has three dimensions: the width, the height and
# the number of layers (3 for RGB picture, 4 for RGBA picture, etc.)
im = imageio.imread('tiger.jpg')

# Darkening the picture by dividing the RGB values of each pixel by 2
# and ensuring the value remains in [0; 255]
im = np.maximum(0, im / 2)

# Writing the new picture without forgetting to convert the values
# from float to uint8 (because of the operations previously performed)
imageio.imwrite('tiger-transformed.jpg', im.astype(np.uint8))
コード例 #44
0
import os
import sys
from imageio import imread, imwrite
from skimage.transform import resize

target_dir = './data/aircraft' if len(sys.argv) < 2 else sys.argv[1]
img_size = [84, 84]

_ids = []

for root, dirnames, filenames in os.walk(target_dir):
    for filename in filenames:
        if filename.endswith(('.jpg', '.webp', '.JPEG', '.png', 'jpeg')):
            _ids.append(os.path.join(root, filename))

for i, path in enumerate(_ids):
    img = imread(path)
    print('{}/{} size: {}'.format(i, len(_ids), img.shape))
    imwrite(path, resize(img, img_size))
コード例 #45
0
ファイル: debug.py プロジェクト: zahidaramai/imgaug
 def receive(self, image):
     imageio.imwrite(self._filepath, image)
コード例 #46
0
def runTimings(mode: str):
    # CONFIGURATION
    SETTINGS_FILE = "../scenes/dvrCassini-scene1.json"
    ROOT_PATH = ".."
    RESOLUTION = (512, 512)
    NUM_FRAMES = 1
    VOLUME_RESOLUTION = 128
    KERNEL_NAMES = [
        ("DVR: DDA - fixed step (control points)",
         "dvr stepping 0.0001\n(Baseline)", 0.001),
        ("DVR: Fixed step size - trilinear", "stepping 0.1", 0.1),
        ("DVR: Fixed step size - trilinear", "stepping 0.01", 0.01),
        ("DVR: Fixed step size - trilinear", "stepping 0.001", 0.001),
        ("DVR: DDA - interval simple", "interval - simple", 1),
        ("DVR: DDA - interval stepping (3)", "interval - stepping-3", 1),
        ("DVR: DDA - interval trapezoid (2)", "interval - trapezoid-2", 1),
        ("DVR: DDA - interval trapezoid (4)", "interval - trapezoid-4", 1),
        ("DVR: DDA - interval trapezoid (10)", "interval - trapezoid-10", 1),
        ("DVR: DDA - interval Simpson (2)", "interval - Simpson-2", 1),
        ("DVR: DDA - interval Simpson (4)", "interval - Simpson-4", 1),
        ("DVR: DDA - interval Simpson (10)", "interval - Simpson-10", 1),
        ("DVR: DDA - interval Simpson adapt",
         "interval - Simpson-adaptive e-3", 1e-3 * VOLUME_RESOLUTION),
        ("DVR: DDA - interval Simpson adapt",
         "interval - Simpson-adaptive e-5", 1e-5 * VOLUME_RESOLUTION),
        #("DVR: DDA - interval trapezoid var", "interval - trapezoid-var 0.1", 0.1),
        #("DVR: DDA - interval trapezoid var", "interval - trapezoid-var 0.01", 0.01),
        #("DVR: DDA - interval trapezoid var", "interval - trapezoid-var 0.001", 0.001),
        ("DVR: Marching Cubes", "marching cubes 1", 1 / 1 - 0.001
         ),  # number of subdivisions
        ("DVR: Marching Cubes", "marching cubes 2", 1 / 2 - 0.001),
        ("DVR: Marching Cubes", "marching cubes 4", 1 / 4 - 0.001),
        ("DVR: Marching Cubes", "marching cubes 8", 1 / 8 - 0.001),
        ("DVR: Marching Cubes", "marching cubes 16", 1 / 16 - 0.001),
    ]
    DENSITY_STEPS = 7
    MIN_DENSITY_DIFFERENCE = 0.005  # minimal difference between min and max density
    TIMING_STEPS = 50
    OUTPUT_STATS_ALL = "../results/statistics/dvr-cassini/timings-all-%s.tsv"
    OUTPUT_STATS_AVG = "../results/statistics/dvr-cassini/timings-avg-%s.tsv"
    OUTPUT_HISTO_ALL = "../results/statistics/dvr-cassini/histograms-%s.tsv"
    OUTPUT_HISTO_CFG = "../results/statistics/dvr-cassini/histogram-cfg-%s.tsv"
    OUTPUT_STATS_USE_DOUBLE = False
    OUTPUT_IMAGE_PATH = "../results/statistics/dvr-cassini/images/"
    OUTPUT_INSTRUMENTATION = "../results/statistics/dvr-cassini/instrumentation.tsv"

    HISTO_NUM_BINS = 100
    HISTO_BIN_MIN = np.log10(1e-6)
    HISTO_BIN_MAX = np.log10(1)
    HISTO_BIN_EDGES = [0.0] + list(10**np.linspace(
        HISTO_BIN_MIN, HISTO_BIN_MAX, HISTO_NUM_BINS))
    print("histogram bins:", HISTO_BIN_EDGES)

    pyrenderer.oit.set_fragment_buffer_size(2**26)
    pyrenderer.oit.set_marching_cubes_mode(
        pyrenderer.oit.MarchingCubesComputationMode.OnTheFly)
    pyrenderer.oit.set_max_fragments_per_pixel(256)
    pyrenderer.oit.set_tile_size(256)

    os.makedirs(OUTPUT_IMAGE_PATH, exist_ok=True)

    # load settings file
    rendererArgs, camera, volumePath = pyrenderer.load_from_json(
        SETTINGS_FILE, ROOT_PATH)
    print("settings loaded")
    rendererArgs.width = RESOLUTION[0]
    rendererArgs.height = RESOLUTION[1]
    base_min_density = rendererArgs.min_density
    base_max_density = rendererArgs.max_density
    base_opacity = rendererArgs.opacity_scaling

    # create density+opacity test cases
    end_max_density = 0.5 * (base_min_density +
                             base_max_density) + MIN_DENSITY_DIFFERENCE
    max_densities = np.power(
        10,
        np.linspace(np.log10(base_max_density), np.log10(end_max_density),
                    DENSITY_STEPS))
    scaling = (base_max_density - base_min_density) / (max_densities -
                                                       base_min_density)
    end_min_density = 0.5 * (base_min_density +
                             base_max_density) - MIN_DENSITY_DIFFERENCE
    min_densities = np.log10(
        np.linspace(np.power(10, base_min_density),
                    np.power(10, end_min_density), DENSITY_STEPS))
    scaling = (base_max_density - base_min_density) / (max_densities -
                                                       min_densities)
    opacities = base_opacity * scaling
    print("min_densities:", min_densities)
    print("max_densities:", max_densities)
    print("opacities:", opacities)

    # create volume
    print("Create Marschner Lobb")
    volume = pyrenderer.Volume.create_implicit(
        pyrenderer.ImplicitEquation.Cassini, VOLUME_RESOLUTION)
    print("Loaded volumed of resolution", volume.resolution, "and world size",
          volume.world_size)
    volume.copy_to_gpu()

    # allocate timing
    timer = pyrenderer.GpuTimer()
    times = [[None] * DENSITY_STEPS for i in range(len(KERNEL_NAMES))]

    if mode == "visualize":
        # allocate output
        pyrenderer.reload_kernels(enableDebugging=False,
                                  enableInstrumentation=False)
        output = pyrenderer.allocate_output(rendererArgs.width,
                                            rendererArgs.height,
                                            rendererArgs.render_mode)
        outputs = [[None] * DENSITY_STEPS for i in range(len(KERNEL_NAMES))]

        # render
        camera.update_render_args(rendererArgs)
        for j, (kernel_name, _, stepsize) in enumerate(KERNEL_NAMES):
            print("Render", kernel_name, stepsize)
            rendererArgs.stepsize = stepsize
            for i in range(DENSITY_STEPS):
                rendererArgs.min_density = min_densities[i]
                rendererArgs.max_density = max_densities[i]
                rendererArgs.opacity_scaling = opacities[i]
                timer.start()
                pyrenderer.render(kernel_name, volume, rendererArgs, output)
                timer.stop()
                outputs[j][i] = np.array(output.copy_to_cpu())
                times[j][i] = timer.elapsed_ms()

        def slugify(value):
            """
            Normalizes string, converts to lowercase, removes non-alpha characters,
            and converts spaces to hyphens.
            """
            import unicodedata
            import re
            value = str(unicodedata.normalize(
                'NFKD', value))  #.encode('ascii', 'ignore'))
            value = re.sub('[^\w\s-]', '', value).strip().lower()
            value = re.sub('[-\s]+', '-', value)
            return value

        # visualize
        print("Visualize")
        #fig, axes = plt.subplots(nrows=len(KERNEL_NAMES), ncols=DENSITY_STEPS)
        for j, (kernel_name, human_kernel_name, _) in enumerate(KERNEL_NAMES):
            for i in range(DENSITY_STEPS):
                filename = os.path.join(
                    OUTPUT_IMAGE_PATH,
                    slugify("%s__%d" % (human_kernel_name, i)) + ".png")
                imageio.imwrite(filename, outputs[j][i][:, :, 0:4])

                #axes[j][i].imshow(outputs[j][i][:,:,0:4])
                #axes[j][i].set_title("time=%.2fms"%times[j][i])
                #if j==len(KERNEL_NAMES)-1:
                #    axes[j][i].set_xlabel("range=%.3f"%(
                #        max_densities[i]-base_min_density))
                #if i==0:
                #    axes[j][i].set_ylabel(human_kernel_name)

        # save to numpy
        npz_output = {}
        npz_output['kernels'] = KERNEL_NAMES
        npz_output['densities'] = [
            max_densities[i] - base_min_density for i in range(DENSITY_STEPS)
        ]
        for j in range(len(KERNEL_NAMES)):
            for i in range(DENSITY_STEPS):
                npz_output['img_%d_%d' % (j, i)] = outputs[j][i]
        np.savez(os.path.join(OUTPUT_IMAGE_PATH, "raw.npz"), **npz_output)

        #plt.subplots_adjust(left=0.03, bottom=0.05, right=0.99, top=0.97, wspace=0.20, hspace=0.23)
        #plt.show()

    elif mode == "measure":
        summed_times = [[0] * DENSITY_STEPS for i in range(len(KERNEL_NAMES))]

        pyrenderer.reload_kernels(enableDebugging=False,
                                  enableInstrumentation=False,
                                  otherPreprocessorArguments=[
                                      "-DKERNEL_USE_DOUBLE=%s" %
                                      ("1" if OUTPUT_STATS_USE_DOUBLE else "0")
                                  ])
        # allocate output for baseline
        output = pyrenderer.allocate_output(rendererArgs.width,
                                            rendererArgs.height,
                                            rendererArgs.render_mode)
        outputs = [[None] * TIMING_STEPS for i in range(DENSITY_STEPS)]

        histograms = [[
            np.zeros(HISTO_NUM_BINS, dtype=np.int64)
            for i in range(DENSITY_STEPS)
        ] for j in range(len(KERNEL_NAMES))]

        # render and write output
        with open(
                OUTPUT_STATS_ALL %
            ("double" if OUTPUT_STATS_USE_DOUBLE else "float"), "w") as f:
            f.write("Kernel Name\tTF-Range\tFrame\tTime (ms)\tPSNR (dB)\n")
            for j, (kernel_name, human_kernel_name,
                    stepsize) in enumerate(KERNEL_NAMES):
                print("Render", kernel_name, stepsize)
                rendererArgs.stepsize = stepsize
                for i in range(DENSITY_STEPS):
                    rendererArgs.min_density = min_densities[i]
                    rendererArgs.max_density = max_densities[i]
                    rendererArgs.opacity_scaling = opacities[i]
                    histogram = histograms[j][i]
                    histogram_edges = None
                    for k in range(TIMING_STEPS):
                        camera.yaw = k * 360 / TIMING_STEPS
                        camera.update_render_args(rendererArgs)
                        timer.start()
                        pyrenderer.render(kernel_name, volume, rendererArgs,
                                          output)
                        timer.stop()
                        out_img = np.array(output.copy_to_cpu())
                        if j == 0:  # baseline
                            outputs[i][k] = out_img
                            psnr = 0
                        else:
                            # compute psnr
                            maxValue = np.max(outputs[i][k][:, :, 0:4])
                            mse = ((outputs[i][k][:, :, 0:4] -
                                    out_img[:, :, 0:4])**2).mean(axis=None)
                            psnr = 20 * np.log10(maxValue) - 10 * np.log10(mse)
                            # compute histogram
                            diff = outputs[i][k][:, :, 0:4] - out_img[:, :,
                                                                      0:4]
                            new_histogram, histogram_edges = np.histogram(
                                diff, bins=HISTO_BIN_EDGES)
                            histogram += new_histogram
                        t = timer.elapsed_ms()
                        summed_times[j][i] += t
                        f.write(
                            "%s\t%.4f\t%d\t%.4f\t%.4f\n" %
                            (human_kernel_name.replace("\n", " "),
                             max_densities[i] - base_min_density, k, t, psnr))

        # write average stats
        with open(
                OUTPUT_STATS_AVG %
            ("double" if OUTPUT_STATS_USE_DOUBLE else "float"), "w") as f:
            f.write("Kernel Name\tTF-Range\tAvg-Time (ms)\n")
            for j, (_, human_kernel_name, _) in enumerate(KERNEL_NAMES):
                for i in range(DENSITY_STEPS):
                    f.write("%s\t%.4f\t%.4f\n" %
                            (human_kernel_name.replace("\n", " "),
                             max_densities[i] - base_min_density,
                             summed_times[j][i] / TIMING_STEPS))

        # write histograms
        with open(
                OUTPUT_HISTO_ALL %
            ("double" if OUTPUT_STATS_USE_DOUBLE else "float"), "w") as f:
            f.write("BinStart\tBinEnd")
            for j in range(len(KERNEL_NAMES)):
                for i in range(DENSITY_STEPS):
                    f.write("\t%d-%d" % (j, i))
            f.write("\n")
            for b in range(HISTO_NUM_BINS):
                f.write("%.10f\t%.10f" %
                        (HISTO_BIN_EDGES[b], HISTO_BIN_EDGES[b + 1]))
                for j in range(len(KERNEL_NAMES)):
                    for i in range(DENSITY_STEPS):
                        f.write("\t%d" % histograms[j][i][b])
                f.write("\n")
        with open(
                OUTPUT_HISTO_CFG %
            ("double" if OUTPUT_STATS_USE_DOUBLE else "float"), "w") as f:
            f.write("Kernel Name\tTF-Range\tConfig-ID\n")
            for j, (_, human_kernel_name, _) in enumerate(KERNEL_NAMES):
                for i in range(DENSITY_STEPS):
                    f.write("%s\t%.4f\t%s\n" %
                            (human_kernel_name.replace("\n", " "),
                             max_densities[i] - base_min_density, "%d-%d" %
                             (j, i)))

    elif mode == "instrumentation":
        # recompile with instrumentation
        pyrenderer.reload_kernels(enableInstrumentation=True)

        # allocate output
        output = pyrenderer.allocate_output(rendererArgs.width,
                                            rendererArgs.height,
                                            rendererArgs.render_mode)
        outputs = [[None] * DENSITY_STEPS for i in range(len(KERNEL_NAMES))]

        fields = [
            "densityFetches", "tfFetches", "ddaSteps", "isoIntersections",
            "intervalEval", "intervalStep", "intervalMaxStep"
        ]

        # render
        with open(OUTPUT_INSTRUMENTATION, "w") as f:
            f.write(
                "Kernel Name\tTF-Range\t%s\tavgIntervalSteps\tnumTriangles\tnumFragments\n"
                % "\t".join(fields))
            camera.update_render_args(rendererArgs)
            for j, (kernel_name, human_kernel_name,
                    stepsize) in enumerate(KERNEL_NAMES):
                print("Render", kernel_name, stepsize)
                rendererArgs.stepsize = stepsize
                for i in range(DENSITY_STEPS):
                    rendererArgs.min_density = min_densities[i]
                    rendererArgs.max_density = max_densities[i]
                    rendererArgs.opacity_scaling = opacities[i]
                    instrumentations, globalInstrumentations = \
                        pyrenderer.render_with_instrumentation(
                            kernel_name, volume, rendererArgs, output)
                    f.write("%s\t%s" % (human_kernel_name.replace(
                        "\n", " "), max_densities[i] - base_min_density))
                    for field in fields:
                        f.write(
                            "\t%.2f" %
                            np.mean(instrumentations[field], dtype=np.float32))
                    avgIntervalSteps = np.mean(
                        instrumentations["intervalStep"].astype(np.float32) /
                        instrumentations["intervalEval"].astype(np.float32))
                    f.write(
                        "\t%.2f\t%d\t%d\n" %
                        (avgIntervalSteps, globalInstrumentations.numTriangles,
                         globalInstrumentations.numFragments))
                    f.flush()
コード例 #47
0
# input_folder = "/home/koosk/data/images/adipocyte/dataset/60x-nuclei_thresholded"
# output_folder = "/home/koosk/data/images/adipocyte/dataset/60x-nuclei_thresholded-tiled"
# input_folder = "/home/koosk/data/images/adipocyte/nuclei_mask-20x"
# input_folder = "/home/koosk/data/images/adipocyte/nuclei_mask-20x-tiled"
tile_size = 512

if __name__ == '__main__':
    os.makedirs(output_folder, exist_ok=True)
    files = listdir(input_folder)
    for file in files:
        input_file = join(input_folder, file)
        if not isfile(input_file):
            continue
        img = imageio.imread(input_file).astype(np.float32)
        img = img - np.min(img)
        img = img / np.max(img) * 255
        img = img.astype('uint8')
        im_bname = os.path.basename(input_file)
        imshape = (np.shape(img))
        im_name = im_bname[:-4]
        im_ext = im_bname[-3:]
        tile_counter = 0
        for i in range(0, imshape[0] - tile_size + 1, tile_size):
            for j in range(0, imshape[1] - tile_size + 1, tile_size):
                tile_counter += 1
                img_tile = img[i:i + tile_size, j:j + tile_size]
                tile_fname = join(
                    output_folder,
                    im_name + "-tile-%03d" % tile_counter + ".png")
                imageio.imwrite(tile_fname, img_tile, "png")
コード例 #48
0
        optmize_Com.zero_grad()
        if opt.feature_loss == True:
            vgg_loss = VGG_loss(generated_img,input_label)
        else:
            vgg_loss = 0
        mse_loss = Critiretion(generated_img, input_label)
        Com_loss = vgg_loss + alpha * mse_loss + mu / 2.0 * torch.norm(latent_vector - Z + eta, 2) ** 2 / \
                   latent_vector.shape[0]
        Com_loss.backward()
        optmize_Com.step()
        with torch.no_grad():
            Z = Quantizer(latent_vector + eta, "Hard")
            eta = eta + latent_vector - Z
    generated_img = model.netDecoder(Quantizer(latent_vector, "Hard"))
    for index in range(input_label.shape[0]):
        gen_img = generated_img[index].detach().cpu().numpy()
        org_img = input_label[index].detach().cpu().numpy()
        gen_img = gen_img * 0.5 - 0.5
        org_img = org_img * 0.5 - 0.5
        gen_img = np.exp(10 * (gen_img))
        org_img = np.exp(10 * (org_img))
        inverse_gen = np.abs(nnls(inverse_matrix, gen_img[0, :, :]))
        inverse_org = np.abs(nnls(inverse_matrix, org_img[0, :, :]))
        inverse_gen_img = (inverse_gen / np.max(inverse_gen.flatten()) * 65535).astype(np.uint16)
        inverse_org_img = (inverse_org / np.max(inverse_org.flatten()) * 65535).astype(np.uint16)
        short_path = ntpath.basename(data['path'][index])
        name_ = os.path.splitext(short_path)[0]
        imageio.imwrite(os.path.join(output_path, name_ + '_syn.png'), inverse_gen_img)
        imageio.imwrite(os.path.join(output_path, name_ + '_real.png'), inverse_org_img)

コード例 #49
0
    res = yolo_detection.detect_imgs(pkllist, nms=0, thresh=0.25)
    detect_end = time.time()
    print('total detect: {:.4f}s'.format(detect_end - detect_begin))

    # nms
    nms_begin = time.time()
    boxes, classes, scores = dsnms(res)
    nms_end = time.time()
    print('total nms: {:.4f}s'.format(nms_end - nms_begin))

    # save&visualization
    save_begin = time.time()
    PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
    NUM_CLASSES = 80
    if not os.path.exists('video/output'):
        os.makedirs('video/output')
    for i, image_path in enumerate(pkllist):
        image_process = get_labeled_image(image_path, PATH_TO_LABELS,
                                          NUM_CLASSES, np.array(boxes[i]),
                                          np.array(classes[i]),
                                          np.array(scores[i]))
        #plt.imshow(image_process)
        #plt.show()
        #scipy.misc.imsave('video/output/frame{}.jpg'.format(i), image_process)
        #plt.image.imsave('video/output/frame{}.jpg'.format(i), image_process)
        imageio.imwrite('video/output/frame{}.jpg'.format(i), image_process)
        if i % 100 == 0:
            print('finish writing image{}'.format(i))
    save_end = time.time()
    print('total writing images: {:.4f}s'.format(save_end - save_begin))
コード例 #50
0
ファイル: utility.py プロジェクト: codyshen0000/Experiment
 def bg_target(queue):
     while True:
         if not queue.empty():
             filename, tensor = queue.get()
             if filename is None: break
             imageio.imwrite(filename, tensor.numpy())
    # get images and extract features
    images = []
    labels_list = []
    landmarks = []
    hog_features = []
    hog_images = []
    for i in range(len(samples)):
        try:
            if labels[i] in SELECTED_LABELS and nb_images_per_label[
                    get_new_label(labels[i])] < IMAGES_PER_LABEL:
                image = np.fromstring(samples[i], dtype=int, sep=" ").reshape(
                    (image_height, image_width))
                images.append(image)
                if SAVE_IMAGES:
                    imageio.imwrite(category + '/' + str(i) + '.jpg', image)
                if GET_HOG_WINDOWS_FEATURES:
                    features = sliding_hog_windows(image)
                    f, hog_image = hog(image,
                                       orientations=8,
                                       pixels_per_cell=(16, 16),
                                       cells_per_block=(1, 1),
                                       visualize=True)
                    hog_features.append(features)
                    hog_images.append(hog_image)
                elif GET_HOG_FEATURES:
                    features, hog_image = hog(image,
                                              orientations=8,
                                              pixels_per_cell=(16, 16),
                                              cells_per_block=(1, 1),
                                              visualize=True)
コード例 #52
0
def main(argv):
    inputfile = r'C:/Users/Samantha/Documents/BIGR_Internship/CT_datasets/luu_nii/image/p_p04_int_04.nii'
    outputfile = r'C:/Users/Samantha/Documents/BIGR_Internship/CT_datasets/luu_png/image'
    try:
        opts, args = getopt.getopt(argv, "hi:o:", ["ifile=", "ofile="])
    except getopt.GetoptError:
        print('nii2png.py -i <inputfile> -o <outputfile>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            print('nii2png.py -i <inputfile> -o <outputfile>')
            sys.exit()
        elif opt in ("-i", "--input"):
            inputfile = arg
        elif opt in ("-o", "--output"):
            outputfile = arg

    print('Input file is ', inputfile)
    print('Output folder is ', outputfile)

    # set fn as your 4d nifti file
    image_array = nibabel.load(inputfile).get_data()
    print(len(image_array.shape))
    image_array = image_array.astype(float)

    ########## Uncomment MASK or IMAGE ##########

    # MASK The mask of the LITS also contains segmentation of the tumors, so add the following
    #image_array[image_array > 0] = 1

    # IMAGE
    wl = 50
    ww2 = 175
    image_array[image_array < (wl - ww2)] = (wl - ww2)
    image_array[image_array > (wl + ww2)] = (wl + ww2)
    image_array = (numpy.maximum(image_array, 0) / image_array.max()) * 255.0
    #############################################

    # ask if rotate
    ask_rotate = input('Would you like to rotate the orientation? (y/n) ')

    if ask_rotate.lower() == 'y':
        ask_rotate_num = int(input('OK. By 90° 180° or 270°? '))
        if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
            print('Got it. Your images will be rotated by {} degrees.'.format(
                ask_rotate_num))
        else:
            print(
                'You must enter a value that is either 90, 180, or 270. Quitting...'
            )
            sys.exit()
    elif ask_rotate.lower() == 'n':
        print('OK, Your images will be converted it as it is.')
    else:
        print('You must choose either y or n. Quitting...')
        sys.exit()

    # if 4D image inputted
    if len(image_array.shape) == 4:
        # set 4d array dimension values
        nx, ny, nz, nw = image_array.shape

        # set destination folder
        if not os.path.exists(outputfile):
            os.makedirs(outputfile)
            print("Created ouput directory: " + outputfile)

        print('Reading NIfTI file...')

        total_volumes = image_array.shape[3]
        total_slices = image_array.shape[2]

        # iterate through volumes
        for current_volume in range(0, total_volumes):
            slice_counter = 0
            # iterate through slices
            for current_slice in range(0, total_slices):
                if (slice_counter % 1) == 0:
                    # rotate or no rotate
                    if ask_rotate.lower() == 'y':
                        if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
                            print('Rotating image...')
                            if ask_rotate_num == 90:
                                data = numpy.rot90(image_array[:, :,
                                                               current_slice,
                                                               current_volume])
                            elif ask_rotate_num == 180:
                                data = numpy.rot90(
                                    numpy.rot90(image_array[:, :,
                                                            current_slice,
                                                            current_volume]))
                            elif ask_rotate_num == 270:
                                data = numpy.rot90(
                                    numpy.rot90(
                                        numpy.rot90(
                                            image_array[:, :, current_slice,
                                                        current_volume])))
                    elif ask_rotate.lower() == 'n':
                        data = image_array[:, :, current_slice, current_volume]

                    #alternate slices and save as png
                    print('Saving image...')
                    image_name = inputfile[:-4] + "_t" + "{:0>3}".format(
                        str(current_volume + 1)) + "_z" + "{:0>3}".format(
                            str(current_slice + 1)) + ".png"
                    imageio.imwrite(image_name, data)
                    print('Saved.')

                    #move images to folder
                    print('Moving files...')
                    src = image_name
                    shutil.move(src, outputfile)
                    slice_counter += 1
                    print('Moved.')

        print('Finished converting images')

    # else if 3D image inputted
    elif len(image_array.shape) == 3:
        # set 4d array dimension values
        nx, ny, nz = image_array.shape

        # set destination folder
        if not os.path.exists(outputfile):
            os.makedirs(outputfile)
            print("Created ouput directory: " + outputfile)

        print('Reading NIfTI file...')

        total_slices = image_array.shape[2]

        slice_counter = 0
        # iterate through slices
        for current_slice in range(0, total_slices):
            # alternate slices
            if (slice_counter % 1) == 0:
                # rotate or no rotate
                if ask_rotate.lower() == 'y':
                    if ask_rotate_num == 90 or ask_rotate_num == 180 or ask_rotate_num == 270:
                        if ask_rotate_num == 90:
                            data = numpy.rot90(image_array[:, :,
                                                           current_slice])
                        elif ask_rotate_num == 180:
                            data = numpy.rot90(
                                numpy.rot90(image_array[:, :, current_slice]))
                        elif ask_rotate_num == 270:
                            data = numpy.rot90(
                                numpy.rot90(
                                    numpy.rot90(image_array[:, :,
                                                            current_slice])))
                elif ask_rotate.lower() == 'n':
                    data = image_array[:, :, current_slice]

                #alternate slices and save as png
                if (slice_counter % 1) == 0:
                    print('Saving image...')
                    image_name = inputfile[:-4] + "_z" + "{:0>3}".format(
                        str(current_slice + 1)) + ".png"

                    imageio.imwrite(image_name, data)
                    print('Saved.')

                    #move images to folder
                    print('Moving image...')
                    src = image_name
                    shutil.move(src, outputfile)
                    slice_counter += 1
                    print('Moved.')

        print('Finished converting images')
    else:
        print('Not a 3D or 4D Image. Please try again.')
コード例 #53
0
                                             lr: learningratevalue
                                         })
        if i % (Epochnum * 100) == 0:
            epochindex = int(i / (Epochnum * 100))
            testaccuracy, outputdata = sess.run([accuracy, back_input],
                                                feed_dict={
                                                    x: x_test,
                                                    y: y_test
                                                })
            costtime = time.time() - currenttime
            print(
                "EPOCHS: %d, train loss:%f, testing accuracy:%f, elapsed time:%f"
                % (epochindex, trainloss, testaccuracy, costtime))
            print("cross_e:%f" % cross_e)
            testf.write(str(epochindex) + '\t' + str(testaccuracy) + '\r\n')
            trainf.write(str(epochindex) + '\t' + str(trainloss) + '\r\n')
            timef.write(str(epochindex) + '\t' + str(costtime) + '\r\n')
            if (epochindex + 1) % 2 == 0:
                print(saveModel(test_path, epochindex))
            # output test image
            outputdata = np.reshape(outputdata, [10, 28, 28])
            resultpath = test_path + "backwardtest_img/"
            while not os.path.exists(resultpath):
                os.mkdir(resultpath)
            for ind in range(10):
                imageio.imwrite(
                    resultpath + 'test%d_%04d.png' % (ind, testindex),
                    outputdata[ind].astype(np.uint8))
            testindex += 1
            currenttime = time.time()
コード例 #54
0
                cache_lambda1c[0]: batch_lamb1c[0], cache_lambda1c[1]: batch_lamb1c[1],
                } 
                
                FD= {X: bx}
                #start = time.time()
                l1a, l1b, l2a, l2b, l3a, l3b, l4a, l4b  = sess.run([ cache_l1[0], cache_l1[1],  cache_l2[0], cache_l2[1], cache_l3[0], cache_l3[1], cache_l4[0], cache_l4[1]], feed_dict=FD)

                im_demo, _ = get_view(im_4_show, by_b1,  by_b2,  by_b3, by_b4, 0.5, p1, p2, pw, ph, num_grid_cells1, num_grid_cells2, num_anchor_boxes, num_classes)

                group_l1=(l1a,l1b)
                group_l2=(l2a,l2b)
                group_l3=(l3a,l3b)
                group_l4=(l4a,l4b)
                im_demo2, boxes = get_view(im_4_show,  group_l1,  group_l2,  group_l3, group_l4, 0.5, p1, p2, pw, ph, num_grid_cells1, num_grid_cells2,  num_anchor_boxes, num_classes)

                full_list.append(im_demo)
                full_list2.append(im_demo2)
                i+=1


for i in range(len(full_list)):
    file_id=str(i)
    while len(file_id)<6:
        file_id = '0' + file_id
    imageio.imwrite(('Images_Export/' + file_id + '.jpg'),full_list2[i])

print("We out here")



コード例 #55
0
    im = imageio.imread(path + '1.jpg')
    F1 = np.array([[1 / 4, 1 / 4], [1 / 4, 1 / 4]])
    F2 = np.array([[1 / 9, 1 / 9, 1 / 9], [1 / 9, 1 / 9, 1 / 9],
                   [1 / 9, 1 / 9, 1 / 9]])
    F3 = np.array([[-1 / 8, -1 / 8, -1 / 8], [-1 / 8, 1, -1 / 8],
                   [-1 / 8, -1 / 8, -1 / 8]])
    F4 = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
    F = F2  #调整这里选择核
    #im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)#控制转变为灰度图
    #im=getim()#调整这里获得构造图像
    show(im)
    #opencv
    tcv = cv2.filter2D(im, -1, F)
    print('cv2:')
    #print(tcv)
    show(tcv)
    #我的程序
    way = 2  #调整这里选择边缘处理方式,默认为第二种
    t = Convolve(im, F, way)
    #print(t.shape)
    print('my')
    #print(t)
    show(t)
    print('相减')
    show(t - tcv)
    #保存
    iname = '1_' + str(way) + '.jpg'
    imageio.imwrite(path + iname, t)
    inamecv = '1_' + 'cv.jpg'
    imageio.imwrite(path + inamecv, tcv)
コード例 #56
0
def writeImage(imagePath, image):
    #raw_input("Press Enter to continue...")
    imageio.imwrite(imagePath, image)
コード例 #57
0
t = time.time()
print('save results', flush=True)

# low level plotting
f, ax = plt.subplots(1, 3, sharex=False, sharey=False)
ax[0].imshow(vol_rec[vol_sz[0] // 2, :, :])
ax[1].imshow(vol_rec[:, vol_sz[1] // 2, :])
ax[2].imshow(vol_rec[:, :, vol_sz[2] // 2])
f.tight_layout()

# construct full path for storing the results
recon_path_full = os.path.join(recon_path, 'Walnut{}'.format(walnut_id))

# create the directory in case it doesn't exist yet
if not os.path.exists(recon_path_full):
    os.makedirs(recon_path_full)

# Save every slice in  the volume as a separate tiff file
orbit_str = 'pos'
for orbit_id in orbits_to_recon:
    orbit_str = orbit_str + '{}'.format(orbit_id)

for i in range(vol_sz[0]):
    slice_path = os.path.join(
        recon_path_full,
        'nnls_' + orbit_str + '_iter{}_ass{}_vmm{}_{:06}.tiff'.format(
            nb_iter, angluar_sub_sampling, voxel_per_mm, i))
    imageio.imwrite(slice_path, vol_rec[i, ...])

print(np.round_(time.time() - t, 3), 'sec elapsed')
コード例 #58
0
ファイル: Shapes.py プロジェクト: stducc/LuNG3D
 def pltImages(self,image,name="plt") :  
     img=np.transpose(image.type(torch.FloatTensor).numpy(),(0,2,1,3))
     imageio.imwrite(name+".png",img.reshape(img.shape[0]*img.shape[1],img.shape[2]*img.shape[3]))
コード例 #59
0
ファイル: compareMethods.py プロジェクト: lilachperry/geomst
def runMethodTest(destPath, opt, contEncPath, styleEncPath, decPath,
                  useConcat):
    if not os.path.exists(destPath):
        os.makedirs(destPath)

    data_loader = CreateDataLoader(opt)
    dataloader = data_loader.load_data()
    dataset_size = len(data_loader)
    print('#testing images = %d' % dataset_size)

    cont_enc = _EncoderNoa(opt.imageSize)
    stl_enc = _EncoderNoa(opt.imageSize)
    dec = _DecoderNoa(opt.imageSize, useConcat)
    cont_enc.load_state_dict(torch.load(contEncPath))
    stl_enc.load_state_dict(torch.load(styleEncPath))
    dec.load_state_dict(torch.load(decPath))

    if opt.cuda:
        # feature_extractor.cuda()
        cont_enc.cuda()
        stl_enc.cuda()
        dec.cuda()

    torch.manual_seed(0)

    for i, data in enumerate(dataloader, 0):

        img1 = data['A']
        img2 = data['B']
        img12 = data['A2']
        img21 = data['B2']

        if opt.cuda:
            img1 = img1.cuda()
            img2 = img2.cuda()
            img12 = img12.cuda()
            img21 = img21.cuda()

        stl1 = stl_enc(img1)
        stl2 = stl_enc(img2)
        cont1 = cont_enc(img1)
        cont2 = cont_enc(img2)

        # stl12 = stl_enc(img12)
        # stl21 = stl_enc(img21)
        # cont12 = cont_enc(img12)
        # cont21 = cont_enc(img21)

        if (useConcat):
            stl1cont2 = torch.cat((stl1, cont2), 1)
            stl2cont1 = torch.cat((stl2, cont1), 1)
            # stl1cont1 = torch.cat((stl1, cont1), 1)
            # stl2cont2 = torch.cat((stl2, cont2), 1)
        else:
            stl1cont2 = stl1 + cont2
            stl2cont1 = stl2 + cont1
            # stl1cont1 = stl1 + cont1
            # stl2cont2 = stl2 + cont2

        dec12 = dec(stl1cont2)
        dec21 = dec(stl2cont1)
        # dec11 = dec(stl1cont1)
        # dec22 = dec(stl2cont2)

        if i % 10 == 0:
            im1 = util.tensor2im(img1[0])
            im2 = util.tensor2im(img2[0])
            oim12 = util.tensor2im(img12[0])
            oim21 = util.tensor2im(img21[0])
            im12 = util.tensor2im(dec12[0])
            im21 = util.tensor2im(dec21[0])

            imageio.imwrite(
                os.path.join(destPath, '%d_style_1_cont_2.png' % (i)), im12)
            imageio.imwrite(
                os.path.join(destPath, '%d_style_2_cont_1.png' % (i)), im21)
            imageio.imwrite(
                os.path.join(destPath, '%d_style_1_cont_1_orig.png' % (i)),
                im1)
            imageio.imwrite(
                os.path.join(destPath, '%d_style_2_cont_2_orig.png' % (i)),
                im2)
            imageio.imwrite(
                os.path.join(destPath, '%d_style_1_cont_2_orig.png' % (i)),
                oim12)
            imageio.imwrite(
                os.path.join(destPath, '%d_style_2_cont_1_orig.png' % (i)),
                oim21)
コード例 #60
0
ファイル: visualize.py プロジェクト: imenebak/DENet
def display_instances(strname,
                      image,
                      boxes,
                      masks,
                      class_ids,
                      class_names,
                      scores=None,
                      title="",
                      figsize=(16, 16),
                      ax=None):
    """
    boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
    masks: [height, width, num_instances]
    class_ids: [num_instances]
    class_names: list of class names of the dataset
    scores: (optional) confidence scores for each box
    figsize: (optional) the size of the image.
    """
    # Number of instances
    # mask=np.zeros([masks.shape[0],masks.shape[1]])
    mask = np.zeros([image.shape[0], image.shape[1]])
    N = boxes.shape[0]
    if not N:
        print("\n*** No instances to display *** \n")
    else:
        assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]

    if not ax:
        _, ax = plt.subplots(1, figsize=figsize)

    # Generate random colors
    colors = random_colors(N)

    # Show area outside image boundaries.
    height, width = image.shape[:2]
    ax.set_ylim(height + 10, -10)
    ax.set_xlim(-10, width + 10)
    ax.axis('off')
    ax.set_title(title)

    masked_image = image.astype(np.uint32).copy()
    masked_image1 = masked_image
    # mymask=np.zeros(image.shape[:2],np.uint32)
    number_of_person = np.sum(class_ids == 1)
    jj = 0
    for i in range(N):
        color = colors[i]
        # color = (0,0,0)

        # Bounding box
        if not np.any(boxes[i]):
            # Skip this instance. Has no bbox. Likely lost in image cropping.
            continue
        y1, x1, y2, x2 = boxes[i]
        w = x2 - x1
        h = y2 - y1
        p = patches.Rectangle((x1, y1),
                              x2 - x1,
                              y2 - y1,
                              linewidth=2,
                              alpha=0.7,
                              linestyle="dashed",
                              edgecolor=color,
                              facecolor='none')
        ax.add_patch(p)

        # Label
        class_id = class_ids[i]

        score = scores[i] if scores is not None else None
        label = class_names[class_id]
        x = random.randint(x1, (x1 + x2) // 2)
        caption = "{} {:.3f}".format(label, score) if score else label
        ax.text(x1,
                y1 + 8,
                caption,
                color='w',
                size=11,
                backgroundcolor="none")

        # if class_id==1:
        #     if w > (image.shape[0]/10) or h > (image.shape[1]/10):
        #         if score>0.95:
        #             mask = mask + masks[:, :, i]
        #             masked_image=apply_mask(masked_image, mask, color)
        #             jj=jj+1

        if class_id == 1:
            # if w > (image.shape[0]/10) or h > (image.shape[1]/10):
            #     if score>0.95:
            mask = mask + masks[:, :, i]
            masked_image = apply_mask(masked_image, mask, color)
            jj = jj + 1

        # mask = mask + masks[:, :, i]
        # masked_image = apply_mask(masked_image, mask, color)

        # Mask Polygon
        # Pad to ensure proper polygons for masks that touch image edges.
        padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2),
                               dtype=np.uint8)
        padded_mask[1:-1, 1:-1] = mask
        contours = find_contours(padded_mask, 0.5)

        for verts in contours:
            # Subtract the padding and flip (y, x) to (x, y)
            verts = np.fliplr(verts) - 1
            p = Polygon(verts, facecolor="none", edgecolor=color)
            ax.add_patch(p)
    mymask = mask
    # num=str(number_of_person)
    num = str(jj)
    str1 = strname.replace('.jpg', 'BW.jpg')
    # # str1=aa[0]+'BW'+'.jpg'
    str2 = num + str1
    import imageio
    imageio.imwrite(str1, mymask)

    return jj