예제 #1
0
 def forward(self, inTensors: list, outTensors: list): 
     self.invalues = [] #[t for t in inTensors]     #store incoming values for backpropagation
     for t in inTensors: 
         ten = Tensor(t.elements,t.shape)
         self.invalues.append(ten)
     for i in range(len(inTensors)): 
         y = np.tanh(inTensors[i].elements)
         outTensors[i] = Tensor(y,np.shape(y))
예제 #2
0
 def forward(self, inTensors: list, outTensors: list):
     self.invalues = []
     for t in inTensors: 
         tensor = Tensor(t.elements, t.shape) 
         self.invalues.append(tensor)
     for i in range(len(inTensors)): 
         x = np.reshape(inTensors[i].elements, inTensors[i].shape)
         y = self.sigm(x)
         outTensors[i] = Tensor(y, y.shape)
예제 #3
0
 def forward(self, inTensors: list, outTensors: list): 
     self.inshape = inTensors[0].shape   
     self.invalues_fw = [] 
     for t in inTensors: 
         ten = Tensor(t.elements,t.shape)
         self.invalues_fw.append(ten)
     
     for i in range(len(inTensors)): 
         if self.filter_depth != inTensors[i].shape[0]: 
                 sys.exit("ERROR - DEPTH OF INPUT TENSOR MUST BE SAME AS LAYER'S FILTER DEPTH")
         
         if inTensors[i].shape[0] == 1: 
             #if depth is 1 (== just 1 filter): skip depth and reshape to (x,y)
             whole_image = inTensors[i].elements.reshape(inTensors[i].shape[1],inTensors[i].shape[2])
         else:
             whole_image = inTensors[i].elements.reshape(inTensors[i].shape)
             
         count = 0 
         tmp = []
         arrs = []
         
         #get the respective image convoluted with the respective filter 
         #img_ch1 *conv f1_ch1
         #img_ch2 *conv f1_ch2
         #img_ch1 *conv f2_ch1
         #img_ch2 *conv f2_ch2
         for x in range(self.filter_count): 
             for k in range(self.filter_depth):
                 if np.size(whole_image.shape) > 2: 
                     res = self.convolve_2d(whole_image[k],self.kernel_weights[count],self.bias[x]) 
                 else:
                     res = self.convolve_2d(whole_image,self.kernel_weights[count],self.bias[x])
                 tmp.append(res)
                 count += 1 
         
         #sum up the results 
         count = 0
         for x in range(self.filter_depth): 
             arrs.append(tmp[count] + tmp[count+1])
             count += self.filter_depth    
         
         stacked_arrays = np.stack(arrs)
         stacked_arrays = np.clip(stacked_arrays,1e-7,1e7)   #TODO: clip also clips negative values 
         
         outTensor = Tensor(stacked_arrays.flatten(), stacked_arrays.shape)
         outTensor.deltas = np.zeros((len(stacked_arrays.flatten())))
         outTensors[i] = outTensor
     self.outshape = outTensors[0].shape
예제 #4
0
 def forward(self, inTensors: list, outTensors: list):
     if self.invalues_fw == None:
         self.invalues_fw = [t for t in inTensors]   #copy routine is legitimate
     for i in range(len(inTensors)):
         x = np.reshape(inTensors[i].elements,inTensors[i].shape)
         y = x @ self.weights + self.bias
         outTensors[i] = Tensor(y, y.shape)
예제 #5
0
 def backward(self, outTensors: list, inTensors: list, update, quickProp=False):
     self.invalues_bw = []
     for t in inTensors:     #weird copy routine, but otherwise it will always create a reference datatype???
         tensor = Tensor(t.elements, (np.size(t.elements),1), t.deltas)
         self.invalues_bw.append(tensor)
     for i in range(len(outTensors)):
         outTensors[i].elements = np.ones(np.shape(self.invalues_fw[0].elements))  #only helper for dimensions, elements not needed 
         outTensors[i].deltas = inTensors[i].deltas @ self.weights.transpose()
     if update: self.param_update(quickProp)
예제 #6
0
 def forward(self, inTensors: list, outTensors: list): 
     self.invalues = []  # [t for t in inTensors]
     for t in inTensors: 
         ten = Tensor(t.elements, t.shape)
         self.invalues.append(ten)
     for i in range(len(inTensors)): 
         for j in range(len(inTensors[i].elements)): 
             if inTensors[i].elements[j] >= 0: 
                 outTensors[i].elements[j] = inTensors[i].elements[j] 
             else: outTensors[i].elements[j] = 0
예제 #7
0
    def backward(self, outTensors: list, inTensors: list, update): 
        self.outvalues_bw = []
        self.invalues_bw = [] 
        for t in inTensors: 
            ten = Tensor(t.elements, t.shape, t.deltas)
            self.invalues_bw.append(ten)
        
        #transpose kernel so that [x,y,depth,#filters] becomes [x,y,#filters,depth]
        self.flip_kernel()
        
        #rotate kernel
        self.rotate_kernel_180()
        
        #the deltas of the outTensors are the deltas of the inTensor reverse-convoluted with the rotated kernel 
        for i in range(len(inTensors)): 
            
            deltas = inTensors[i].deltas        #some large array, like 12x1 (coming from a 3x4 conv. with [2,2,2,2] kernel)
            delta_shape = (inTensors[i].shape[1],inTensors[i].shape[2])
            

            #map respective outputs to inputs, e.g. 18x1 becomes 2x (2x3) 
            respective_deltas = []          
            curr = 0
            for l in range(1,self.filter_depth+1):
                if self.filter_depth == 1: 
                    respective_deltas.append(deltas.reshape(delta_shape))
                    break
                stopper = (int)(l*len(deltas)/(self.filter_count))
                respective_deltas.append(deltas[curr:stopper].reshape(delta_shape))
                curr = stopper
            
            #pad the deltas to make them ready for convolution that returns inputshaped array: 
            for l in range(len(respective_deltas)):
                px = (int)(np.floor(self.filtersize_x/2))
                py = (int)(np.floor(self.filtersize_y/2))
                respective_deltas[l] = np.pad(respective_deltas[l],(px,py),mode='constant',constant_values=(0,0))
                
                
            # reconvolute: returns #filtercount times  delta-arrays with shape: inshape
            # i.e. here: 2x 4x4 
            
            ## - same routine as in forward -> own method? 
            
            count = 0 
            tmp = []
            arrs = []
            
            #get the respective image convoluted with the respective filter 
            #img_ch1 *conv f1_ch1
            #img_ch2 *conv f1_ch2
            #img_ch1 *conv f2_ch1
            #img_ch2 *conv f2_ch2
            for x in range(self.filter_count): 
                for k in range(self.filter_depth):
                    res = self.convolve_2d(respective_deltas[k],self.kernel_weights[count],self.bias[x]) 
                    tmp.append(res)
                    count += 1 
            
            #sum up the results 
            count = 0
            for x in range(self.filter_depth): 
                arrs.append(tmp[count] + tmp[count+1])
                count += self.filter_count      #TODO: this was 2, is it now really universally applicable? 
            
            
            stacked = np.stack(arrs)
            stacked = np.clip(stacked, 0.1e-5, 1e5)
            
            ten = Tensor(np.ones(len(stacked.flatten())), stacked.shape)         #just put ones to have dummy for dimension 
            ten.deltas = stacked.flatten()
            if DEBUG: print("delta sum: {}".format(np.sum(ten.deltas)))
            
            outTensors[i] = ten
            
            self.outvalues_bw.append(stacked)   #keep the deltas in cache for weight update
            
            # ------------ finished processing all intensors
            
        #redo kernel transpose and rotate so that the layer can be used 
        #for the next forward pass again 
        self.flip_kernel()
        self.rotate_kernel_180()
        
        
        if update: self.param_update()
예제 #8
0
 def forward(self, rawData: list) -> list: 
     tensors = [] 
     for data in rawData: 
         tensors.append(Tensor(data,np.shape(data)))
     
     return tensors