def _init_weights(self, weight_shape, bias_shape): if self.initB is None: self.initB = 0.0 if self.initW is None: self.initW = 1.0 / np.sqrt(np.prod(weight_shape)) self.bias.shape = bias_shape self.weight.shape = weight_shape if self.weight.wt is None: self.weight.set_weight(to_gpu(col_randn(weight_shape, np.float32) * self.initW)) if self.bias.wt is None: self.bias.set_weight(to_gpu((np.ones(bias_shape, dtype=np.float32) * self.initB)))
def fprop(self, input, output, train=TRAIN): gpu_copy_to(dot(self.weight.wt, input), output) add_vec_to_rows(output, self.bias.wt) if train == TEST: if self.dropRate > 0.0: output *= (1.0 - self.dropRate) else: if self.dropRate > 0.0: self.dropMask = to_gpu(np.random.uniform(0, 1, output.size).astype(np.float32).reshape(output.shape)) bigger_than_scaler(self.dropMask, self.dropRate) gpu_copy_to(output * self.dropMask, output) if PFout: print_matrix(output, self.name)
def __init__(self, name, type, epsW, epsB, initW, initB, momW, momB, wc, weight, bias, weightIncr , biasIncr, disable_bprop=False): Layer.__init__(self, name, type, disable_bprop) self.initW = initW self.initB = initB self.weight = WEIGHTS.empty('weight.' + self.name, epsW, momW, wc) self.bias = WEIGHTS.empty('bias.' + self.name, epsB, momB, 0.0) if weight is not None: self.weight.set_weight(weight) if weightIncr is not None: self.weight.set_incr(weightIncr) if bias is not None: self.bias.set_weight(bias) if biasIncr is not None: self.bias.set_incr(to_gpu(biasIncr))