예제 #1
0
 def val_iter(self, count, recorder):
     
     '''use the val_iter_fn compiled'''
         
     if self.current_v==0 and self.subb_v == 0: 
         self.data.shuffle_data(mode='val')
         self.data.shard_data(mode='val',rank=self.rank, size=self.size)
         
     img= self.data.val_img_shard
     labels = self.data.val_labels_shard
         
     img_mean = self.data.rawdata[4]
     mode='val'
     function=self.val_iter_fn
     
     if self.subb_v == 0: # load the whole file into shared_x when loading sub-batch 0 of each file.
     
         arr = img[self.current_v] #- img_mean
     
         arr = np.rollaxis(arr,0,4)
                             
         self.shared_x.set_value(arr)
         self.shared_y.set_value(labels[self.current_v])
     
     
         if self.current_v == self.data.n_batch_val - 1:
             self.last_one_v = True
         else:
             self.last_one_v = False
     
     from theanompi.models.layers2 import Dropout, Crop       
     Dropout.SetDropoutOff()
     Crop.SetRandCropOff()
     cost,error,error_top5 = function(self.subb_v)
     Dropout.SetDropoutOn()
     Crop.SetRandCropOn()
     
     recorder.val_error(count, cost, error, error_top5)
     
     if (self.subb_v+1)//self.n_subb == 1: # test if next sub-batch is in another file
     
         if self.last_one_v == False:
             self.current_v+=1
         else:
             self.current_v=0
     
         self.subb_v=0
     else:
         self.subb_v+=1
예제 #2
0
 def val_iter(self, count,recorder):
     
     '''use the val_iter_fn compiled'''
     
     if self.current_v==0: self.data.shard_data(file_batch_size, self.rank, self.size)
     
     img= self.data.val_img_shard
     labels = self.data.val_labels_shard
     
     mode='val'
     function=self.val_iter_fn
     
     if self.subb_v == 0: # load the whole file into shared_x when loading sub-batch 0 of each file.
     
         # parallel loading of shared_x
         if self.data.para_load:
             
             icomm = self.data.icomm
         
             if self.current_v == 0:
             
                 # 3.0 give mode signal to adjust loading mode between train and val
                 icomm.isend('val',dest=0,tag=40)
                 # 3.1 give load signal to load the very first file
                 icomm.isend(img[self.current_v],dest=0,tag=40)
             
             
             if self.current_v == self.data.n_batch_val - 1:
                 
                 self.last_one_v = True
                 # Only to get the last copy_finished signal from load
                 icomm.isend(img[self.current_v],dest=0,tag=40) 
                 
             else:
                 
                 self.last_one_v = False
                 # 4. give preload signal to load next file
                 icomm.isend(img[self.current_v+1],dest=0,tag=40)
                 
             
             # 5. wait for the batch to be loaded into shared_x
             msg = icomm.recv(source=0,tag=55) #
             assert msg == 'copy_finished'
             
     
         else:
     
 
             arr = hkl.load(img[self.current_v]) #- img_mean
     
             # arr = np.rollaxis(arr,0,4)
                             
             self.shared_x.set_value(arr)
             
             
         # direct loading of shared_y    
         self.shared_y.set_value(labels[self.current_v])
     
     
         if self.current_v == self.data.n_batch_val - 1:
             self.last_one_v = True
         else:
             self.last_one_v = False
             
     from theanompi.models.layers2 import Dropout, Crop       
     Dropout.SetDropoutOff()
     Crop.SetRandCropOff()
     cost,error,error_top5 = function(self.subb_v)
     Dropout.SetDropoutOn()
     Crop.SetRandCropOn()
     
     recorder.val_error(count, cost, error, error_top5)
     
     if (self.subb_v+1)//self.n_subb == 1: # test if next sub-batch is in another file
     
         if self.last_one_v == False:
             self.current_v+=1
         else:
             self.current_v=0
     
         self.subb_v=0
     else:
         self.subb_v+=1