Beispiel #1
0
def buildNet(layerSizes, scales, fanOuts, binary_visible, hidden_activation_function = Sigmoid(), uniforms = None):
    shapes = [(layerSizes[i-1],layerSizes[i]) for i in range(1, len(layerSizes))]
    assert(len(scales) == len(shapes) == len(fanOuts))
    if uniforms == None:
        uniforms = [False for s in shapes]
    assert(len(scales) == len(uniforms))
    

    # randomize the network weights according to the Bottou proposition
    # this is borrowed from the ffnet project:
    # http://ffnet.sourceforge.net/_modules/ffnet.html#ffnet.randomweights
    n = 0
    for i in range(len(layerSizes)-1):
        n += layerSizes[i]*layerSizes[i+1]
        n += layerSizes[i+1]

    bound = 2.38 / np.sqrt(n)

    initialWeights = []
    for layer in range(len(shapes)):
       W = [np.random.uniform(-bound, bound) for i in range(shapes[layer][0]*shapes[layer][1])]
       #for j in range(W.size):
       #  W[j] = np.random.uniform(-bound, bound)
       W = np.array(W).reshape((shapes[layer][0], shapes[layer][1]))
       initialWeights.append(W)

    initialBiases = [gp.garray(0*np.random.rand(1, layerSizes[i])) for i in range(1, len(layerSizes))]
    initialWeights = [gp.garray(initWeightMatrix(shapes[i], scales[i], fanOuts[i], uniforms[i])) for i in range(len(shapes))]
        
    net = dbn_neuralnet_gpu(initialWeights, initialBiases, hidden_activation_function=hidden_activation_function, binary_visible=binary_visible)
    return net
Beispiel #2
0
 def get_gpu_model(cls,cpu_model):
     
     num_visible = cpu_model.weights.shape[0]
     num_hidden = cpu_model.weights.shape[1]
     rbm = rbm_gpu(num_hidden,num_visible)
     rbm.weights = gp.garray(cpu_model.weights.astype(np.float32))
     rbm.hidden_bias = gp.garray(cpu_model.hidden_bias.astype(np.float32))
     rbm.visible_bias = gp.garray(cpu_model.visible_bias.astype(np.float32))
     
     rbm.visible_unittype = activation_name_map[cpu_model.hidden_unittype_name]
     rbm.hidden_unittype = activation_name_map[cpu_model.hidden_unittype_name]
             
     return rbm
Beispiel #3
0
    def train_one_batch(self, batch, l2_reg, learnRate):
        '''
        '''
        batch_size = batch.shape[0]
        
        visible =  isinstance(batch, gp.garray) and batch or gp.garray(batch.astype(np.float32)) 
        
        gw, gh, gv, v2 = self.cd1(visible, self.weights, self.visible_bias, self.hidden_bias, self.visible_unittype, self.hidden_unittype,self.dropout)
                
        self.grad_weights = self.momentum*self.grad_weights + gw
        self.grad_visible = self.momentum*self.grad_visible + gv
        self.grad_hidden = self.momentum*self.grad_hidden + gh

        if l2_reg > 0:
            self.weights *= 1 - l2_reg*learnRate
        
        self.weights += (learnRate/batch_size) * self.grad_weights
        self.visible_bias += (learnRate/batch_size) * self.grad_visible
        self.hidden_bias += (learnRate/batch_size) * self.grad_hidden

        #we compute squared error even for binary visible unit RBMs
        return (v2-visible).euclid_norm()**2/(batch.shape[0]*batch.shape[1])
Beispiel #4
0
def garrayify(arrays):
    return [ar if isinstance(ar, gp.garray) else gp.garray(ar) for ar in arrays]
Beispiel #5
0
       self.stream.flush()
   def __getattr__(self, attr):
       return getattr(self.stream, attr)
   
   
sys.stdout = Unbuffered(sys.stdout)  # for logging into file to work 
    
# wait gpu
_waitGpu = os.environ.get('DEEPLINK_WAIT_GPU', 'no')

if(_waitGpu == 'yes'):    
    locked_gpu = False
    total_wait_seconds = 0
    while(not locked_gpu):
        try:
            gp.garray(np.zeros(1))
            locked_gpu = True
            print 'GPU board is available after waited %d seconds' % total_wait_seconds
        except:
            locked_gpu = False
            if(total_wait_seconds==0): print 'No GPU board is available, waiting...'
            seconds = 600 + random.randint(-500, 500)            
            time.sleep(seconds)  # sleep 5 minutes to re-check
            total_wait_seconds += seconds


# create logger
logger = logging.getLogger('deeplink_logger')
logger.setLevel(logging.DEBUG)

# create console handler and set level to debug