def DenseNet40(self, inputs=None, is_train=True, reload_w=None, num_classes=None): model = Pruner(reload_file=reload_w) # net header x = model._add_layer(inputs, mode="conv", out_c=32, k_size=3, strides=1, with_bn=False, act=None) # stage 1 x = self.densenet_block(model, x, is_train=is_train) x = self.densenet_trans(model, x, is_train=is_train) #16 # stage 2 x = self.densenet_block(model, x, is_train=is_train) x = self.densenet_trans(model, x, is_train=is_train) #8 # stage 3 x = self.densenet_block(model, x, is_train=is_train) x = model.bn_act_layer(x, is_train=is_train) x = model.gap_layer(x) x = model._add_layer(x, mode="fc", out_c=num_classes, act=None, with_bn=False) return x, model
def simpleNet(self, inputs=None, is_train=True, reload_w=None, num_classes=None): model = Pruner(reload_file=reload_w) k_size = 5 x = model._add_layer(inputs, mode='conv', out_c=32, k_size=k_size, strides=1, is_train=is_train) x = model._add_layer(x, mode='conv', out_c=64, k_size=k_size, strides=2, is_train=is_train) x1 = model._add_layer(x, mode='conv', out_c=64, k_size=k_size, strides=1, is_train=is_train) x2 = model._add_layer(x, mode='conv', out_c=64, k_size=k_size, strides=1, is_train=is_train) x = model.Add_layer(x1, x2) x = model._add_layer(x, mode='conv', out_c=64, k_size=k_size, strides=1, is_train=is_train) x = model._add_layer(x, mode='conv', out_c=96, k_size=k_size, strides=2, is_train=is_train) x = model._add_layer(x, mode='conv', out_c=96, k_size=k_size, strides=1, is_train=is_train) x = model._add_layer(x, mode='conv', out_c=128, k_size=k_size, strides=2, is_train=is_train) x = model.gap_layer(x) x = model._add_layer(x, mode="fc", out_c=num_classes, with_bn=False, act=None) return x, model
def MobileNetV1(self, inputs=None, is_train=None, reload_w=None, num_classes=None): model = Pruner(reload_file=reload_w) x = model._add_layer(inputs, mode="conv", out_c=32, k_size=3, strides=1) x = model._add_layer(x, mode="dconv", out_c=64, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=128, k_size=3, strides=2, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=128, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=256, k_size=3, strides=2, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=256, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=2, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model._add_layer(x, mode="dconv", out_c=512, k_size=3, strides=1, with_bn=True, is_train=is_train) x = model.gap_layer(x) x = model._add_layer(x, mode="fc", out_c=num_classes, act=None) return x, model
def ResNet18(self, inputs=None, is_train=True, reload_w=None, num_classes=None): model = Pruner(reload_file=reload_w) num_block = self.model_config["resnet18"] block_func = self.__resnet_block_v1 if self.block_version == 1 else self.__resnet_block_v2 if self.block_version == 1: x = model._add_layer(inputs, mode="conv", out_c=self.init_channels, k_size=3, strides=1, with_bn=True, is_train=is_train) else: x = model._add_layer(inputs, mode="conv", out_c=self.init_channels, k_size=3, strides=1, with_bn=False, act=None) # stage 1 out size = 32 for _ in range(num_block[0]): x = block_func(model, inputs=x, out_c=self.init_channels, strides=1, is_train=is_train) # stage 2 out_size = 16 x = block_func(model, inputs=x, out_c=self.init_channels * 2, strides=2, is_train=is_train) for _ in range(num_block[1] - 1): x = block_func(model, inputs=x, out_c=self.init_channels * 2, strides=1, is_train=is_train) # stage 3 out_size = 8 x = block_func(model, inputs=x, out_c=self.init_channels * 4, strides=2, is_train=is_train) for _ in range(num_block[2] - 1): x = block_func(model, inputs=x, out_c=self.init_channels * 4, strides=1, is_train=is_train) # stage 4 out_size = 4 x = block_func(model, inputs=x, out_c=self.init_channels * 8, strides=2, is_train=is_train) for _ in range(num_block[3] - 1): x = block_func(model, inputs=x, out_c=self.init_channels * 8, strides=1, is_train=is_train) if self.block_version == 2: x = model.bn_act_layer(x, is_train=is_train) x = model.gap_layer(x) x = model._add_layer(x, mode="fc", out_c=num_classes, act=None, with_bn=False) return x, model