def initialize(self): self.bn0 = M.BatchNorm() self.c1 = M.ConvLayer(7, 64, stride=2, activation=M.PARAM_RELU, batch_norm=True, usebias=False) self.pool = M.MaxPool2D(3, 2) self.stage1 = Stage(64, num_units=3, stride=1) self.stage2 = Stage(128, num_units=4, stride=2) self.stage3 = Stage(256, num_units=6, stride=2) self.stage4 = Stage(512, num_units=3, stride=2) self.bn1 = M.BatchNorm() self.act = M.Activation(M.PARAM_RELU) self.ssh_c3_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU) self.det3 = DETHead() self.head32 = RegressHead() self.ssh_c2_lateral = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU) self.ssh_c3_upsampling = M.NNUpSample(2) self.ssh_c2_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU) self.det2 = DETHead() self.head16 = RegressHead() self.ssh_m1_red_conv = M.ConvLayer(1, 256, batch_norm=True, activation=M.PARAM_RELU) self.ssh_c2_upsampling = M.NNUpSample(2) self.ssh_c1_aggr = M.ConvLayer(3, 256, batch_norm=True, activation=M.PARAM_RELU) self.det1 = DETHead() self.head8 = RegressHead()
def initialize(self, outchn, stride): self.stride = stride self.outchn = outchn self.bn0 = M.BatchNorm() self.c1 = M.ConvLayer(3, outchn, activation=M.PARAM_PRELU, usebias=False, batch_norm=True) self.c2 = M.ConvLayer(3, outchn, stride=stride, usebias=False, batch_norm=True) # se module #self.c3 = M.ConvLayer(1, outchn//16, activation=M.PARAM_PRELU) #self.c4 = M.ConvLayer(1, outchn, activation=M.PARAM_SIGMOID) # shortcut self.sc = M.ConvLayer(1, outchn, stride=stride, usebias=False, batch_norm=True)
def initialize(self, ksize, filters, stride, expand): self.outchn = filters self.expand = expand self.stride = stride outchn = filters * expand self.bn0 = M.BatchNorm() self.c0 = M.ConvLayer(1, outchn, usebias=False, batch_norm=True, activation=M.PARAM_PRELU) self.c1 = M.DWConvLayer(ksize, 1, stride=stride, usebias=False, batch_norm=True, activation=M.PARAM_PRELU) # se self.se1 = M.ConvLayer(1, outchn // 8, activation=M.PARAM_PRELU) self.se2 = M.ConvLayer(1, outchn, activation=M.PARAM_SIGMOID) self.c2 = M.ConvLayer(1, filters, batch_norm=True, usebias=False) self.sc = M.ConvLayer(1, filters, stride=stride, batch_norm=True, usebias=False)
def initialize(self, channel_list, blocknum_list, embedding_size, embedding_bn=True): self.c1 = M.ConvLayer(3, channel_list[0], activation=M.PARAM_PRELU) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() self.fc1 = M.Dense(512)
def initialize(self, chn, stride=1, shortcut=False): self.bn0 = M.BatchNorm() self.act = M.Activation(M.PARAM_RELU) self.c1 = M.ConvLayer(1, chn, activation=M.PARAM_RELU, batch_norm=True, usebias=False) self.c2 = M.ConvLayer(3, chn, stride=stride, activation=M.PARAM_RELU, batch_norm=True, usebias=False) self.c3 = M.ConvLayer(1, chn*4, usebias=False) self.shortcut = shortcut if shortcut: self.sc = M.ConvLayer(1, chn*4, stride=stride, usebias=False)
def initialize(self, channel_list, blocknum_list, embedding_size, embedding_bn=True): self.c1 = M.ConvLayer(3, channel_list[0], 1, usebias=False, activation=M.PARAM_PRELU, batch_norm=True) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() print('Embedding_size:', embedding_size) self.fc1 = M.Dense(embedding_size, usebias=False)
def initialize(self, outchn=512, dilation=1, k=3): self.bn = M.BatchNorm() self.c1 = M.ConvLayer1D(k, outchn, dilation_rate=dilation, activation=M.PARAM_PRELU, batch_norm=True, usebias=False, pad='VALID') self.c2 = M.ConvLayer1D(3, outchn, pad='VALID')
def initialize(self, channel_list, blocknum_list, drop_prob): self.c1 = M.ConvLayer(3, channel_list[0], usebias=False, batch_norm=True, activation=M.PARAM_PRELU) self.stage1 = Stage(channel_list[1], blocknum_list[0], drop_prob) self.stage2 = Stage(channel_list[2], blocknum_list[1], drop_prob) self.stage3 = Stage(channel_list[3], blocknum_list[2], drop_prob) self.stage4 = Stage(channel_list[4], blocknum_list[3], drop_prob) self.bn1 = M.BatchNorm() self.fc1 = M.Dense(512, usebias=False, batch_norm=True)
def initialize(self, channel_list, blocknum_list, embedding_size): self.c1 = M.ConvLayer(3, channel_list[0], 1, usebias=False, activation=M.PARAM_PRELU, batch_norm=True) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() self.fcs = nn.ModuleList() if isinstance(embedding_size, list): for size in embedding_size: self.fcs.append(M.Dense(size, usebias=False)) else: self.fcs.append(M.Dense(embedding_size, usebias=False))
def initialize(self, out): self.c1 = M.ConvLayer(1, out // 2, stride=2, usebias=False) self.c2 = M.ConvLayer(1, out // 2, stride=2, usebias=False) self.bn = M.BatchNorm()
def initialize(self, stride, bn=False): self.bn = bn self.p = M.AvgPool2D(3, stride) if self.bn: self.batch_norm = M.BatchNorm()