Пример #1
0
    def imread(self,key,filename_fullpath ,
               read_grayscale = False):
        """This function tries to do things inplace"""     
        FilesDirs.raise_if_file_does_not_exist(filename_fullpath)         
        try:
            self.imgs[key][:] = cv2.imread(filename_fullpath) 
#            print "YES!"                      
        except (KeyError,NameError,ValueError) as e:
            self.set_img(key,Img(filename_fullpath,read_grayscale = read_grayscale))
Пример #2
0
#!/usr/bin/env python
"""
Created on Tue Feb  4 11:04:35 2014

Author: Oren Freifeld
Email: [email protected]
"""
import os
import inspect

from of.utils import Bunch,FilesDirs
dirname_of_this_file = os.path.dirname(os.path.abspath(
                        inspect.getfile(inspect.currentframe())))
if len(dirname_of_this_file)==0:
    raise ValueError
print 'dirname_of_this_file',dirname_of_this_file
dirname = os.path.join(dirname_of_this_file,'..')
dirname = os.path.abspath(dirname)

FilesDirs.raise_if_dir_does_not_exist(dirname)

dirnames = Bunch()
dirnames.cpa = os.path.join(dirname,'cpa_files')



if __name__ == "__main__":
    for k in sorted(dirnames.keys()):
        print '{}:\n\t{}'.format(k,dirnames[k])
    
Пример #3
0
    def getKernel(self):
        tess = self.tess

        filenames = Bunch()
        dname = os.path.dirname(__file__)
        filenames['range_and_dim_are_the_same_shared'] = os.path.join(
            dname, 'transformKernels64_nD_noloop.cu')

        filenames[
            'range_and_dim_are_the_same_only_As_are_shared'] = os.path.join(
                dname, 'transformKernels64_nD_noloop_only_As_are_shared.cu')
        filenames['range_and_dim_are_the_same_noshared'] = os.path.join(
            dname, 'transformKernels64_nD_noloop_noshared.cu')

        filenames['scalar_shared'] = os.path.join(
            dname, 'transformKernels64_nD_scalar_noloop.cu')

        filenames['scalar_only_As_are_shared'] = os.path.join(
            dname, 'transformKernels64_nD_scalar_noloop_only_As_are_shared.cu')

        n = self.dim_domain
        if n not in [1, 2, 3]:
            if self.dim_domain != self.dim_range:
                raise NotImplementedError
            n = 'H'
            n1 = n
            n2 = n
        else:
            n1 = self.dim_domain
            n2 = self.dim_range

        s = '_'.join([str(n)] * 2)

        filenames[s] = Bunch()
        filenames[s] = os.path.join(dname, 'calc_{0}Dto{1}D.cu'.format(n1, n2))

        if self.sharedmemory == 0:
            filenames[s] = filenames[s].replace('.cu', '_no_shared.cu')
        elif self.sharedmemory == 1:
            filenames[s] = filenames[s].replace('.cu',
                                                '_only_As_are_shared.cu')
        elif self.sharedmemory == 2:
            pass
        else:
            raise ValueError(self.sharedmemory)

#        if n==2:
#            if ((computer.has_good_gpu_card and 320<=self.nCells)
#                or
#                (320 <=self.nCells)
#                ):
#                filenames[s]=filenames[s].replace('.cu','_only_As_are_shared.cu')
#
#        elif n == 3:
#            if ((computer.has_good_gpu_card and 320<=self.nCells)
#                or
#                (320 <=self.nCells)
#                ):
#                filenames[s]=filenames[s].replace('.cu','_only_As_are_shared.cu')
#        if n>3:
#            filenames[s]=filenames[s].replace('.cu','_no_shared.cu')
#
#
        if not 'calc_transformation_gpu' in dict(self.__dict__):
            k = str(n1) + '_' + str(n2)
            filename = filenames[k]

            self.kernel_filename = filename
            for i in range(2):
                try:
                    FilesDirs.raise_if_file_does_not_exist(filename)
                    break
                except FileDoesNotExistError:
                    print "Attempt {} out {}".format(i + 1, 5)
                    print "Couldn't find {0}.\nMaybe the network is (temporarily?) down...".format(
                        filename)
                    print "Let me sleep over it for a second before I try again"
                    time.sleep(1)
                    pass
            else:  # In effect, we didn't break out of the loop.
                raise

            with open(filename, 'r') as content_file:
                kernel = content_file.read()
            # We ran into numerical problems with 32bit...
            # Had to switch to 64
            if self.my_dtype == np.float32:
                kernel = re.sub("double", "float", kernel)
            # Define the number of cells here dynamically.
            addition = (
                '#define N_CELLS {}\n'.format(self.nCells) +
                '#define DIM {}\n'.format(self.dim_domain) +
                '#define TESS_TYPE {}'.format(2 - ['II', 'I'].index(tess)) +
                ' // 2 is II; 1 is I\n\n')

            kernel = addition + kernel
            #            print kernel
            self.kernel = kernel

            print "kernel_filename"
            print self.kernel_filename

            try:
                mod = comp.SourceModule(kernel, include_dirs=include_dirs)
            except:
                raise
                print '-' * 60
                print 'comp.SourceModule(kernel) failed!'
                print 'trying without shared memory. The code might run slower.'
                print '-' * 60
                mod = comp.SourceModule(kernel.replace('__shared__', ''))
#                ipshell('comp.SourceModule(kernel) failed!')
#                raise
            if self.dim_domain == self.dim_range:

                # At some point the line below was commented out. Not sure why
                try:
                    self.calc_T_simple_gpu = mod.get_function('calc_T_simple')
                except:
                    pass

                self.calc_T_gpu = mod.get_function('calc_T')
                self.calc_trajectory_gpu = mod.get_function('calc_trajectory')
                self.calc_v_gpu = mod.get_function('calc_v')
                self.calc_cell_idx_gpu = mod.get_function('calc_cell_idx')

#                self.calc_grad_theta_gpu = mod.get_function('calc_grad_theta')
            elif self.dim_range == 1:
                self.calc_v_gpu_scalar = mod.get_function('calc_v_scalar')
                self.calc_T_gpu_scalar = mod.get_function('calc_T_scalar')
            else:
                raise NotImplementedError(self.dim_domain, self.dim_range)
Пример #4
0
#!/usr/bin/env python
"""
Created on Tue Feb  4 11:04:35 2014

Author: Oren Freifeld
Email: [email protected]
"""
import os
import inspect

from of.utils import Bunch, FilesDirs
dirname_of_this_file = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
if len(dirname_of_this_file) == 0:
    raise ValueError
print 'dirname_of_this_file', dirname_of_this_file
dirname = os.path.join(dirname_of_this_file, '..')
dirname = os.path.abspath(dirname)

FilesDirs.raise_if_dir_does_not_exist(dirname)

dirnames = Bunch()
dirnames.cpa = os.path.join(dirname, 'cpa_files')

if __name__ == "__main__":
    for k in sorted(dirnames.keys()):
        print '{}:\n\t{}'.format(k, dirnames[k])
Пример #5
0
    def getKernel(self):        
        tess= self.tess
        
        filenames = Bunch()
        dname = os.path.dirname(__file__)
        filenames['range_and_dim_are_the_same_shared']=os.path.join(dname,
                                                       'transformKernels64_nD_noloop.cu')  

        filenames['range_and_dim_are_the_same_only_As_are_shared']=os.path.join(dname,
                                                       'transformKernels64_nD_noloop_only_As_are_shared.cu')   
        filenames['range_and_dim_are_the_same_noshared']=os.path.join(dname,
                                                       'transformKernels64_nD_noloop_noshared.cu')                                                        
        
        filenames['scalar_shared']=os.path.join(dname,
                                                       'transformKernels64_nD_scalar_noloop.cu')                                                
                              
        filenames['scalar_only_As_are_shared']=os.path.join(dname,
                                                       'transformKernels64_nD_scalar_noloop_only_As_are_shared.cu') 

        
        n = self.dim_domain
        if n not in [1,2,3]:
            if self.dim_domain != self.dim_range:
                raise NotImplementedError
            n = 'H'
            n1 = n
            n2 = n
        else:
            n1 = self.dim_domain
            n2 = self.dim_range
        
        s = '_'.join([str(n)]*2)
       
        filenames[s]=Bunch()
        filenames[s]=os.path.join(dname,'calc_{0}Dto{1}D.cu'.format(n1,n2))
        
        
        if self.sharedmemory == 0:
            filenames[s]=filenames[s].replace('.cu','_no_shared.cu')
        elif self.sharedmemory == 1:
            filenames[s]=filenames[s].replace('.cu','_only_As_are_shared.cu')
        elif self.sharedmemory == 2:
            pass
        else:
            raise ValueError(self.sharedmemory)
            
        
#        if n==2:
#            if ((computer.has_good_gpu_card and 320<=self.nCells)
#                or  
#                (320 <=self.nCells)
#                ):                               
#                filenames[s]=filenames[s].replace('.cu','_only_As_are_shared.cu')
#            
#        elif n == 3:             
#            if ((computer.has_good_gpu_card and 320<=self.nCells)
#                or  
#                (320 <=self.nCells)
#                ):                               
#                filenames[s]=filenames[s].replace('.cu','_only_As_are_shared.cu')
#        if n>3:
#            filenames[s]=filenames[s].replace('.cu','_no_shared.cu')
#            
#       
        if not 'calc_transformation_gpu' in dict(self.__dict__): 
            k = str(n1)+'_'+str(n2)
            filename = filenames[k]
                    




            self.kernel_filename = filename
            for i in range(2):
                try:              
                    FilesDirs.raise_if_file_does_not_exist(filename)
                    break
                except FileDoesNotExistError:
                    print "Attempt {} out {}".format(i+1,5)
                    print "Couldn't find {0}.\nMaybe the network is (temporarily?) down...".format(filename)
                    print "Let me sleep over it for a second before I try again"
                    time.sleep(1)
                    pass
            else:  # In effect, we didn't break out of the loop.
                raise
                
            with open(filename, 'r') as content_file:      
                kernel = content_file.read()  
            # We ran into numerical problems with 32bit...
            # Had to switch to 64                           
            if self.my_dtype == np.float32:
                kernel = re.sub("double","float",kernel)
            # Define the number of cells here dynamically.
            addition = ('#define N_CELLS {}\n'.format(self.nCells)+
                        '#define DIM {}\n'.format(self.dim_domain)+
                        '#define TESS_TYPE {}'.format(2-['II','I'].index(tess))+
                        ' // 2 is II; 1 is I\n\n')
                        
            kernel =  addition + kernel
#            print kernel
            self.kernel = kernel       
            
            print "kernel_filename"
            print self.kernel_filename            
            
            try:
                mod = comp.SourceModule(kernel,include_dirs=include_dirs)           
            except:
                raise 
                print '-'*60
                print 'comp.SourceModule(kernel) failed!'
                print 'trying without shared memory. The code might run slower.'
                print '-'*60
                mod = comp.SourceModule(kernel.replace('__shared__',''))
#                ipshell('comp.SourceModule(kernel) failed!')
#                raise
            if self.dim_domain==self.dim_range:
                
                # At some point the line below was commented out. Not sure why
                try:
                    self.calc_T_simple_gpu =  mod.get_function('calc_T_simple')
                except:
                    pass
            
            
                self.calc_T_gpu =  mod.get_function('calc_T') 
                self.calc_trajectory_gpu =  mod.get_function('calc_trajectory')  
                self.calc_v_gpu =  mod.get_function('calc_v') 
                self.calc_cell_idx_gpu =   mod.get_function('calc_cell_idx') 

#                self.calc_grad_theta_gpu = mod.get_function('calc_grad_theta')                       
            elif self.dim_range==1:                 
                self.calc_v_gpu_scalar =  mod.get_function('calc_v_scalar')
                self.calc_T_gpu_scalar =  mod.get_function('calc_T_scalar')
            else:
                raise NotImplementedError(self.dim_domain,self.dim_range)