Exemplo n.º 1
0
    def __init__(self, num_hidden, num_visible, data=None, binary_visible=True, hidden_activation_function = Sigmoid(), dropout=0.0):
        '''Initialize a restricted boltzmann machine on GPU

        num_visible: The number of visible units. data.shape[0]
        num_hidden: The number of hidden units.
        binary: True if the visible units are binary, False if the visible units
          are normally distributed.
        '''
        self.num_hidden = num_hidden
        self.num_visible = num_visible 
        
        
        self.hidden_unittype = hidden_activation_function
        self.visible_unittype = self.hidden_unittype if binary_visible else Gaussian()
        
        if(data is not None):
            self.data = data#isinstance(data, gp.garray) and data or gp.garray(data.astype(np.float32))        
            self.num_visible = data.shape[1]
        
        self.weights = 0.1 * gp.randn(self.num_visible,self.num_hidden)
        self.hidden_bias = -4. * gp.ones(self.num_hidden)
        self.visible_bias = gp.zeros(self.num_visible)
    
        self.grad_weights = gp.zeros(self.weights.shape)
        self.grad_visible = gp.zeros(self.visible_bias.shape)
        self.grad_hidden = gp.zeros(self.hidden_bias.shape)
        self.dropout = dropout
        if(dropout is None or dropout==0):
            self.cd1 = CD1
        else:
            self.cd1=CD1_dropout
Exemplo n.º 2
0
 def __init__(self, data, label=None, batch_size=128, randomize=False, gpu_buffer_size_MB=512):
     nsize = int(gpu_buffer_size_MB * 1024 * 1024 / 4)  # float32 take 4 bytes        
     
     self.data = data
     self.label = label
     self.batch_size = batch_size
     self.randomize = randomize
     
     columns = self.data.shape[1] if self.label is None else 1 + self.data.shape[1] 
     
     self.num_rows = int(nsize / columns)  # maximum number of rows can hold in buffer
     # align with the minibatch_size
     self.num_rows = int(self.num_rows / self.batch_size) * self.batch_size
     #print "number of rows to be allocated in gpu: %d"%self.num_rows
     # allocate gpu memory only once, reuse after worth if possible
     
     if(gpu_buffer._gpu_buffer_size < self.num_rows * self.data.shape[1]):
         # requesting bigger buffer, has to re-allocate and resize        
         print "requesting buffer with size of %d float32" % (self.num_rows * self.data.shape[1])    
         # if(gpu_buffer._gpu_buffer is not None): gpu_buffer._gpu_buffer._free_device_memory()
         gpu_buffer._gpu_buffer = gp.zeros((self.num_rows, self.data.shape[1]))                                    
         gpu_buffer._gpu_buffer_size = self.num_rows * self.data.shape[1]
                 
     # if(gpu_buffer._gpu_label_buffer is not None): gpu_buffer._gpu_label_buffer._free_device_memory() 
     if self.label is not None:           
         gpu_buffer._gpu_label_buffer = gp.zeros((self.num_rows, self.label.shape[1])) 
Exemplo n.º 3
0
 def __init__(self, initialWeights, initialBiases,hidden_activation_function=Sigmoid(),binary_visible = False):
     
     num_layers = len(initialWeights)
     
     self.binary_visible = binary_visible
     self.learn_rate_decay_half_life = 100
     self.apply_L2cost_after = 100
     self.weights = garrayify(initialWeights)
     self.biases = garrayify(initialBiases)
     
     self.hidden_activation_functions = [hidden_activation_function for i in range(num_layers)]
     
     #self.use_ReLU = use_ReLU
     #if use_ReLU:
     #    self.hidden_activation_functions = [ReLU() for i in range(num_layers - 1)]
     #else:
     #    self.hidden_activation_functions = [Sigmoid() for i in range(num_layers - 1)]
     
     #state variables modified in backward_propagate
     self.grad_weights = [gp.zeros(self.weights[i].shape) for i in range(num_layers)]
     self.grad_bias = [gp.zeros(self.biases[i].shape) for i in range(num_layers)]