def setup(self, bottom, top): conv_param = self.layer_param.convolution_param channels, height, width = bottom[0].shape num_output = conv_param.num_output assert channels % self.group == 0, \ "Number of channels should be a multiple of group." assert num_output % self.group == 0, \ "Number of outputs should be a multiple of group." self.bias_term = conv_param.bias_term if self.weights is not None: logging.debug("Skipping parameter initialization") else: weights_shape = (num_output, channels // self.group, self.kernel_h, self.kernel_w) weight_filler = conv_param.weight_filler if weight_filler.type == 'gaussian': self.weights = weight_filler.mean + weight_filler.std * \ Array.standard_normal( weights_shape).astype(np.float32) self.weight_diff = Array.empty_like(self.weights) else: raise Exception("Filler not implemented for weight filler \ type {}".format(weight_filler.type)) if self.bias_term: self.bias = Array((num_output, ), np.float32) self.bias_diff = Array.empty_like(self.bias) filler = conv_param.bias_filler if filler.type == 'constant': self.bias.fill(filler.value) else: raise Exception("Filler not implemented for bias filler \ type {}".format(filler.type))
def setup(self, bottom, top): conv_param = self.layer_param.convolution_param channels, height, width = bottom[0].shape num_output = conv_param.num_output assert channels % self.group == 0, \ "Number of channels should be a multiple of group." assert num_output % self.group == 0, \ "Number of outputs should be a multiple of group." self.bias_term = conv_param.bias_term if self.weights is not None: logging.debug("Skipping parameter initialization") else: weights_shape = (num_output, channels // self.group, self.kernel_h, self.kernel_w) weight_filler = conv_param.weight_filler if weight_filler.type == 'gaussian': self.weights = weight_filler.mean + weight_filler.std * \ Array.standard_normal( weights_shape).astype(np.float32) self.weight_diff = Array.empty_like(self.weights) else: raise Exception("Filler not implemented for weight filler \ type {}".format(weight_filler.type)) if self.bias_term: self.bias = Array((num_output, ), np.float32) self.bias_diff = Array.empty_like(self.bias) filler = conv_param.bias_filler if filler.type == 'constant': self.bias.fill(filler.value) else: raise Exception("Filler not implemented for bias filler \ type {}".format(filler.type))
def setup(self, bottom, top): weights_shape = (self.num_output, bottom.shape[0]) weight_filler = self.layer_param.inner_product_param.weight_filler if weight_filler.type == 'gaussian': self.weights = weight_filler.mean + weight_filler.std * \ Array.standard_normal( weights_shape).astype(np.float32) else: raise Exception("Filler not implemented for weight filler" "type {}".format(weight_filler.type))