def __init__(self, outsize, batch_norm=False, activation=-1, usebias=True, norm=False): super(Dense, self).__init__() self.fc = L.fcLayer(outsize, usebias, norm=norm) self.batch_norm = batch_norm self.activation = activation if batch_norm: self.bn = L.batch_norm() if activation != -1: self.act = L.activation(activation)
def initialize(self, size, chn, ratio, input_ratio=None, stride=1, pad='SAME', activation=-1, batch_norm=False, usebias=True): chn_big = int(chn * ratio) chn_small = chn - chn_big self.avgpool = AvgPool(2, 2) self.Convhh = ConvLayer(size, chn_big, stride=stride, pad=pad, usebias=usebias) self.Convhl = ConvLayer(size, chn_small, stride=stride, pad=pad, usebias=usebias) self.Convlh = ConvLayer(size, chn_big, stride=stride, pad=pad, usebias=usebias) self.Convll = ConvLayer(size, chn_small, stride=stride, pad=pad, usebias=usebias) # bn and act if batch_norm: self.bn = L.batch_norm() if activation != -1: self.act = L.activation(activation) self.batch_norm = batch_norm self.activation = activation self.ratio = ratio self.input_ratio = ratio if input_ratio is None else input_ratio self.stride = stride self.chn_big = chn_big
def __init__(self, outsize, adj_mtx=None, adj_fn=None, usebias=True, activation=-1, batch_norm=False): super(GraphConvLayer, self).__init__() self.GCL = L.graphConvLayer(outsize, adj_mtx=adj_mtx, adj_fn=adj_fn, usebias=usebias) self.batch_norm = batch_norm self.activation = activation if batch_norm: self.bn = L.batch_norm() if activation != -1: self.act = L.activation(activation)
def __init__(self, size, outchn, dilation_rate=1, stride=1, pad='SAME', activation=-1, batch_norm=False, usebias=True): super(DeconvLayer3D, self).__init__() self.conv = L.deconv3D(size, outchn, stride, pad, dilation_rate, usebias) self.batch_norm = batch_norm self.activation = activation if batch_norm: self.bn = L.batch_norm() if activation != -1: self.act = L.activation(activation)
def __init__(self, size, outchn, dilation_rate=1, stride=1, pad='SAME', activation=-1, batch_norm=False, usebias=True): """ :type size: int or list[int] :param size: Indicate the size of convolution kernel. :type outchn: int :param outchn: Number of output channels :type stride: int or list[int] :param stride: Stride number. Can be either integer or list of integers :type pad: String :param pad: Padding method, must be one of 'SAME', 'VALID', 'SAME_LEFT'. 'VALID' does not use auto-padding scheme. 'SAME' uses tensorflow-style auto-padding and 'SAME_LEFT' uses pytorch-style auto-padding. :type dilation_rate: int or list[int] :param dilation_rate: Dilation rate. Can be either integer or list of integers. When dilation_rate is larger than 1, stride should be 1. :type usebias: bool :param usebias: Whether to add bias term in this layer. :param activation: Same candidates as layers3.activate :type batch_norm: bool :param batch_norm: Whether to use batch normalization in this layer. """ super(ConvLayer1D, self).__init__() self.conv = L.conv1D(size, outchn, stride, pad, dilation_rate, usebias) self.batch_norm = batch_norm self.activation = activation if batch_norm: self.bn = L.batch_norm() if activation != -1: self.act = L.activation(activation)
def __init__(self, size, multiplier, dilation_rate=1, stride=1, pad='SAME', activation=-1, batch_norm=False, usebias=True, values=None): """ :type size: int or list[int] :param size: Indicate the size of convolution kernel. :type multiplier: int :param multiplier: Multiplier of number of output channel. (outchannel = multiplier * inchannel) :type stride: int or list[int] :param stride: Stride number. Can be either integer or list of integers :type pad: String :param pad: Padding method, must be one of 'SAME', 'VALID', 'SAME_LEFT'. 'VALID' does not use auto-padding scheme. 'SAME' uses tensorflow-style auto-padding and 'SAME_LEFT' uses pytorch-style auto-padding. :type dilation_rate: int or list[int] :param dilation_rate: Dilation rate. Can be either integer or list of integers. When dilation_rate is larger than 1, stride should be 1. :type usebias: bool :param usebias: Whether to add bias term in this layer. :type values: list[np.array] :param values: If the param 'values' is set, the layer will be initialized with the list of numpy array. :param activation: Same candidates as layers3.activate :type batch_norm: bool :param batch_norm: Whether to use batch normalization in this layer. """ super(DWConvLayer, self).__init__() self.batch_norm = batch_norm self.activation = activation self.values = values if values is None: self.dwconv = L.dwconv2D(size, multiplier, stride, pad, dilation_rate, usebias) if batch_norm: self.bn = L.batch_norm() else: if usebias: idx = 2 else: idx = 1 self.dwconv = L.dwconv2D(size, multiplier, stride, pad, dilation_rate, usebias, values=values[:idx]) if batch_norm: self.bn = L.batch_norm(values=values[idx:]) if activation != -1: self.act = L.activation(activation)