def __init__(self, dim1, dim2): """Constructor @param dim1 -- number of neurons in each row layer. @param dim2 -- number of neurons in each column of the layer. """ self.activations = cp.get_filled_matrix(dim1, dim2, 0.0) self.deltas = cp.get_filled_matrix(dim1, dim2, 0.0)
def __init__(self, source_layer, target_layer): """Constructor @param source_layer pointer to the previous neuron layer. @param target_layer pointer to the next neuron layer. """ self.source=source_layer self.target=target_layer dim1 = self.target.activations.h dim2 = self.source.activations.h self.weight = cp.get_filled_matrix(dim1, dim2, 0.0) cp.fill_rnd_uniform(self.weight) cp.apply_scalar_functor(self.weight, cp.scalar_functor.SUBTRACT, 0.5) cp.apply_scalar_functor(self.weight, cp.scalar_functor.DIV, 10) self.bias = cp.get_filled_matrix(dim1, 1, 0)
def weight_update(self, learnrate=0.01, decay=0.0): """Updates the weights and the bias using source activations and target deltas. @param learnrate how strongly the gradient influences the weights @param decay large values result in a regularization with to the squared weight value""" batch_size=self.source.activations.w h = cp.dev_matrix_cmf(self.weight.h, self.weight.w) cp.prod(h, self.target.deltas, self.source.activations, 'n', 't') cp.learn_step_weight_decay(self.weight, h, learnrate/batch_size, decay) h.dealloc() h = cp.get_filled_matrix(self.target.activations.h, 1, 0) cp.reduce_to_col(h.vec, self.target.deltas) cp.learn_step_weight_decay(self.bias, h, learnrate/batch_size, decay) h.dealloc()
def __init__(self, source_layer, target_layer): """Constructor @param source_layer reference to previous neuron layer. @param target_layer reference to next neuron layer. """ self.source = source_layer self.target = target_layer dim1 = self.target.activations.shape[0] dim2 = self.source.activations.shape[0] self.weight = cp.get_filled_matrix(dim1, dim2, 0.0) cp.fill_rnd_uniform(self.weight) self.weight -= 0.5 self.weight /= 10.0 self.bias = cp.dev_tensor_float(dim1) cp.fill(self.bias, 0)