def initialize(self, kwargs): super(WeightLayer, self).initialize(kwargs) req_param(self, ['weight_init', 'lrule_init', 'nin', 'nout']) opt_param(self, ['accumulate'], False) opt_param(self, ['batch_norm'], False) self.weight_init.initialize(self.backend) self.params = [] self.updates = [] if self.batch_norm: self.bn = BatchNorm() kwargs['layer'] = self self.bn.initialize(kwargs)
def initialize(self, kwargs): super(WeightLayer, self).initialize(kwargs) req_param(self, ['nin', 'nout']) opt_param(self, ['weight_init'], default_weight_init()) opt_param(self, ['lrule_init'], default_lrule_init()) opt_param(self, ['accumulate'], False) opt_param(self, ['batch_norm'], False) opt_param(self, ['mempool']) # Used for parallel mode self.weight_init.initialize(self.backend) self.params = [] self.updates = [] if self.batch_norm: self.bn = BatchNorm() kwargs['layer'] = self self.bn.initialize(kwargs)