def __init__(self, n_features, eps=None): df.Module.__init__(self) self.weight, self.grad_weight = create_param_and_grad( n_features, df.init.const(1), name='W_BN_{}'.format(n_features)) self.bias, self.grad_bias = create_param_and_grad( n_features, df.init.const(0), name='b_BN_{}'.format(n_features)) self.inference_weight = create_param( n_features, df.init.const(1), name='W_BN_{}_inf'.format(n_features)) self.inference_bias = create_param( n_features, df.init.const(0), name='b_BN_{}_inf'.format(n_features)) # These are buffers for collecting the minibatch statistics. self.buffer_variance = create_param( n_features, df.init.const(1), name='BN_var_{}'.format(n_features)) self.buffer_mean = create_param(n_features, df.init.const(0), name='BN_mean_{}'.format(n_features)) self.buffer_counts = df.th.shared( _np.asarray(0, dtype=df.floatX), name='BN_count_{}'.format(n_features)) self.eps = eps or 1e-5 self.batch_mean = None self.batch_var = None
def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, pad_w=0, pad_h=0, mode='cross', with_bias=True, initW=df.init.xavier(), initB=df.init.const(0), border=None): # mode='cross' is the default in Lasagne[1], Torch[2], matConvNet[3], Caffee[4]. # # 1: https://github.com/Lasagne/Lasagne/blob/63d44a0d/lasagne/layers/dnn.py#L299 # 2: https://github.com/soumith/cudnn.torch/blob/840f0228/SpatialConvolution.lua#L83 # 3: https://github.com/vlfeat/matconvnet/blob/b7dd9c96/matlab/src/bits/impl/nnconv_cudnn.cu#L133 # 4: https://github.com/BVLC/caffe/blob/50ab52cb/include/caffe/util/cudnn.hpp#L104 # # `border` is an alternative way to specify `pad_w` and `pad_h` so that Theano strings can be used. Better documentation to follow soon. df.Module.__init__(self) self.n_input_plane = n_input_plane self.n_output_plane = n_output_plane self.k_w = k_w self.k_h = k_h self.d_w = d_w self.d_h = d_h self.mode = mode self.with_bias = with_bias # 'same' is a (common) shortcut for "zero-padding so that outshape == inshape". self.border = border or (pad_h, pad_w) if self.border == 'same': assert self.k_w % 2 == 1 and self.k_h % 2 == 1, "'same' convolution only supports odd filter sizes." self.border = ((self.k_h-1)//2, (self.k_w-1)//2) w_shape = (n_output_plane, n_input_plane, k_h, k_w) w_fan = (n_input_plane*k_w*k_h, n_output_plane*k_w*k_h) self.weight, self.grad_weight = create_param_and_grad(w_shape, initW, fan=w_fan, name='Wconv_{},{}@{}x{}'.format(n_input_plane, n_output_plane, k_w, k_h)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad(n_output_plane, initB, name='bconv_{}'.format(n_output_plane))
def __init__(self, n_input_vol, n_output_vol, k_w, k_h, k_d, with_bias=True, initW=df.init.xavier(), initB=df.init.const(0), border_mode='valid', volshape=None): df.Module.__init__(self) self.n_input_vol = n_input_vol self.n_output_vol = n_output_vol self.k_w = k_w self.k_h = k_h self.k_d = k_d self.with_bias = with_bias self.border_mode = border_mode self.volshape = volshape self.w_shape = (n_output_vol, k_d, n_input_vol, k_h, k_w) w_fan = (n_input_vol * k_w * k_h * k_d, n_output_vol * k_w * k_h * k_d) self.weight, self.grad_weight = create_param_and_grad( self.w_shape, initW, fan=w_fan, name='Wconv_{},{},{},{},{}'.format(n_output_vol, k_d, n_input_vol, k_h, k_w)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( (n_output_vol, ), initB, name='bconv_{}'.format(n_output_vol))
def __init__( self, n_input_vol, n_output_vol, k_w, k_h, k_d, with_bias=True, initW=df.init.xavier(), initB=df.init.const(0), border_mode="valid", volshape=None, ): df.Module.__init__(self) self.n_input_vol = n_input_vol self.n_output_vol = n_output_vol self.k_w = k_w self.k_h = k_h self.k_d = k_d self.with_bias = with_bias self.border_mode = border_mode self.volshape = volshape self.w_shape = (n_output_vol, k_d, n_input_vol, k_h, k_w) w_fan = (n_input_vol * k_w * k_h * k_d, n_output_vol * k_w * k_h * k_d) self.weight, self.grad_weight = create_param_and_grad( self.w_shape, initW, fan=w_fan, name="Wconv_{},{},{},{},{}".format(n_output_vol, k_d, n_input_vol, k_h, k_w) ) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( (n_output_vol,), initB, name="bconv_{}".format(n_output_vol) )
def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, pad_w=0, pad_h=0, with_bias=True, initW=xavier(), initB=const(0)): Module.__init__(self) self.n_input_plane = n_input_plane self.n_output_plane = n_output_plane self.k_w = k_w self.k_h = k_h self.d_w = d_w self.d_h = d_h self.pad_w = pad_w self.pad_h = pad_h self.with_bias = with_bias w_shape = (n_output_plane, n_input_plane, k_h, k_w) w_fan = (n_input_plane * k_w * k_h, n_output_plane * k_w * k_h) self.weight, self.grad_weight = create_param_and_grad( w_shape, initW, fan=w_fan, name='Wconv_{},{}@{}x{}'.format(n_input_plane, n_output_plane, k_w, k_h)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( n_output_plane, initB, name='bconv_{}'.format(n_output_plane))
def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, with_bias=True, initW=df.init.xavier(), initB=df.init.const(0), border_mode='valid', imshape=None): df.Module.__init__(self) self.n_input_plane = n_input_plane self.n_output_plane = n_output_plane self.k_w = k_w self.k_h = k_h self.d_w = d_w self.d_h = d_h self.with_bias = with_bias self.border_mode = border_mode self.imshape = imshape self.w_shape = (n_output_plane, n_input_plane, k_h, k_w) w_fan = (n_input_plane * k_w * k_h, n_output_plane * k_w * k_h) self.weight, self.grad_weight = create_param_and_grad( self.w_shape, initW, fan=w_fan, name='Wconv_{},{}@{}x{}'.format(n_input_plane, n_output_plane, k_w, k_h)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( n_output_plane, initB, name='bconv_{}'.format(n_output_plane))
def __init__(self, nin, nout, with_bias=True, initW=df.init.xavier(), initB=df.init.const(0)): df.Module.__init__(self) self.nin = nin self.nout = nout self.with_bias = with_bias self.weight, self.grad_weight = create_param_and_grad((nin, nout), initW, fan=(nin, nout), name='Wlin_{}x{}'.format(nin, nout)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad(nout, initB, name='blin_{}'.format(nout))
def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, pad_w=0, pad_h=0, mode='cross', with_bias=True, initW=df.init.xavier(), initB=df.init.const(0), border=None): # mode='cross' is the default in Lasagne[1], Torch[2], matConvNet[3], Caffee[4]. # # 1: https://github.com/Lasagne/Lasagne/blob/63d44a0d/lasagne/layers/dnn.py#L299 # 2: https://github.com/soumith/cudnn.torch/blob/840f0228/SpatialConvolution.lua#L83 # 3: https://github.com/vlfeat/matconvnet/blob/b7dd9c96/matlab/src/bits/impl/nnconv_cudnn.cu#L133 # 4: https://github.com/BVLC/caffe/blob/50ab52cb/include/caffe/util/cudnn.hpp#L104 # # `border` is an alternative way to specify `pad_w` and `pad_h` so that Theano strings can be used. Better documentation to follow soon. df.Module.__init__(self) self.n_input_plane = n_input_plane self.n_output_plane = n_output_plane self.k_w = k_w self.k_h = k_h self.d_w = d_w self.d_h = d_h self.mode = mode self.with_bias = with_bias # 'same' is a (common) shortcut for "zero-padding so that outshape == inshape". self.border = border or (pad_h, pad_w) if self.border == 'same': assert self.k_w % 2 == 1 and self.k_h % 2 == 1, "'same' convolution only supports odd filter sizes." self.border = ((self.k_h - 1) // 2, (self.k_w - 1) // 2) w_shape = (n_output_plane, n_input_plane, k_h, k_w) w_fan = (n_input_plane * k_w * k_h, n_output_plane * k_w * k_h) self.weight, self.grad_weight = create_param_and_grad( w_shape, initW, fan=w_fan, name='Wconv_{},{}@{}x{}'.format(n_input_plane, n_output_plane, k_w, k_h)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( n_output_plane, initB, name='bconv_{}'.format(n_output_plane))
def __init__(self, n_input_plane, n_output_plane, k_w, k_h, d_w=1, d_h=1, pad_w=0, pad_h=0, with_bias=True, initW=xavier(), initB=const(0)): Module.__init__(self) self.n_input_plane = n_input_plane self.n_output_plane = n_output_plane self.k_w = k_w self.k_h = k_h self.d_w = d_w self.d_h = d_h self.pad_w = pad_w self.pad_h = pad_h self.with_bias = with_bias w_shape = (n_output_plane, n_input_plane, k_h, k_w) w_fan = (n_input_plane*k_w*k_h, n_output_plane*k_w*k_h) self.weight, self.grad_weight = create_param_and_grad(w_shape, initW, fan=w_fan, name='Wconv_{},{}@{}x{}'.format(n_input_plane, n_output_plane, k_w, k_h)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad(n_output_plane, initB, name='bconv_{}'.format(n_output_plane))
def __init__(self, n_features, eps=None): df.Module.__init__(self) self.weight, self.grad_weight = create_param_and_grad(n_features, df.init.const(1), name='W_BN_{}'.format(n_features)) self.bias, self.grad_bias = create_param_and_grad(n_features, df.init.const(0), name='b_BN_{}'.format(n_features)) self.inference_weight = create_param(n_features, df.init.const(1), name='W_BN_{}_inf'.format(n_features)) self.inference_bias = create_param(n_features, df.init.const(0), name='b_BN_{}_inf'.format(n_features)) # These are buffers for collecting the minibatch statistics. self.buffer_variance = create_param(n_features, df.init.const(1), name='BN_var_{}'.format(n_features)) self.buffer_mean = create_param(n_features, df.init.const(0), name='BN_mean_{}'.format(n_features)) self.buffer_counts = df.th.shared(_np.asarray(0, dtype=df.floatX), name='BN_count_{}'.format(n_features)) self.eps = eps or 1e-5 self.batch_mean = None self.batch_var = None
def __init__(self, nin, nout, with_bias=True, initW=df.init.xavier(), initB=df.init.const(0)): df.Module.__init__(self) self.nin = nin self.nout = nout self.with_bias = with_bias self.weight, self.grad_weight = create_param_and_grad( (nin, nout), initW, fan=(nin, nout), name='Wlin_{}x{}'.format(nin, nout)) if self.with_bias: self.bias, self.grad_bias = create_param_and_grad( nout, initB, name='blin_{}'.format(nout))