def __init__(self, initializer=None, shape=None, name=None): if initializer is None: initializer = constant.NaN() elif numpy.isscalar(initializer): initializer = constant.Constant(initializer) if shape is None: if isinstance(initializer, (numpy.ndarray, cuda.ndarray)): # parameter initialized by the initial array super(Parameter, self).__init__(initializer, name=name) else: # uninitialized parameter super(Parameter, self).__init__(name=name) self.initializer = initializer dtype = getattr(initializer, 'dtype', numpy.float32) self._grad_initializer = constant.NaN(dtype) else: # parameter initialized with a given shape if isinstance(initializer, (numpy.ndarray, cuda.ndarray)): xp = cuda.get_array_module(initializer) initializer = constant.Constant(initializer) else: xp = numpy data = initializers.generate_array(initializer, shape, xp) grad = xp.full_like(data, numpy.nan) super(Parameter, self).__init__(data, name=name, grad=grad) self.update_rule = None
def setUp(self): self.in_size = 2 self.out_size = 3 self.pool_size = 4 if self.initializer == 'Initializer': self.initialW = constant.Constant(1.0) self.initial_bias = constant.Constant(2.0) elif self.initializer == 'scalar': self.initialW = 1.0 self.initial_bias = 2.0 elif self.initializer == 'ndarray': self.initialW = numpy.random.uniform( -1, 1, (self.out_size, self.pool_size, self.in_size)).astype( self.dtype) self.initial_bias = numpy.random.uniform( -1, 1, (self.out_size, self.pool_size)).astype(self.dtype) elif self.initializer == 'callable': def callable_initialW(array): assert array.dtype == self.dtype assert array.shape == (self.out_size, self.pool_size, self.in_size) array.fill(1.0) self.initialW = callable_initialW def callable_initial_bias(array): assert array.dtype == self.dtype assert array.shape == (self.out_size, self.pool_size) array.fill(2.0) self.initial_bias = callable_initial_bias else: raise ValueError('invalid parameter') with chainer.using_config('dtype', self.dtype): self.link = links.Maxout(self.in_size, self.out_size, self.pool_size, initialW=self.initialW, initial_bias=self.initial_bias)