def __init__(self, builder: ConvBuilder, deps): super(LeNet5, self).__init__() self.bd = builder stem = builder.Sequential() stem.add_module( 'conv1', builder.Conv2d(in_channels=1, out_channels=LENET5_DEPS[0], kernel_size=5, bias=True)) stem.add_module('maxpool1', builder.Maxpool2d(kernel_size=2)) stem.add_module( 'conv2', builder.Conv2d(in_channels=LENET5_DEPS[0], out_channels=LENET5_DEPS[1], kernel_size=5, bias=True)) stem.add_module('maxpool2', builder.Maxpool2d(kernel_size=2)) self.stem = stem self.flatten = builder.Flatten() self.linear1 = builder.Linear(in_features=LENET5_DEPS[1] * 16, out_features=LENET5_DEPS[2]) self.relu1 = builder.ReLU() self.linear2 = builder.Linear(in_features=LENET5_DEPS[2], out_features=10)
def __init__(self, builder:ConvBuilder): super(LeNet300, self).__init__() self.flatten = builder.Flatten() self.linear1 = builder.Linear(in_features=28*28, out_features=300, bias=True) self.relu1 = builder.ReLU() self.linear2 = builder.Linear(in_features=300, out_features=100, bias=True) self.relu2 = builder.ReLU() self.linear3 = builder.Linear(in_features=100, out_features=10, bias=True)
def __init__(self, num_classes, builder: ConvBuilder, deps): super(VCNet, self).__init__() self.stem = _create_vgg_stem(builder=builder, deps=deps) self.flatten = builder.Flatten() self.linear1 = builder.IntermediateLinear(in_features=deps[12], out_features=512) self.relu = builder.ReLU() self.linear2 = builder.Linear(in_features=512, out_features=num_classes)
def __init__(self, builder: ConvBuilder, num_blocks, num_classes=1000, deps=None): super(SBottleneckResNet, self).__init__() # self.mean_tensor = torch.from_numpy(np.array([0.485, 0.456, 0.406])).reshape(1, 3, 1, 1).cuda().type(torch.cuda.FloatTensor) # self.std_tensor = torch.from_numpy(np.array([0.229, 0.224, 0.225])).reshape(1, 3, 1, 1).cuda().type(torch.cuda.FloatTensor) # self.mean_tensor = torch.from_numpy(np.array([0.406, 0.456, 0.485])).reshape(1, 3, 1, 1).cuda().type( # torch.cuda.FloatTensor) # self.std_tensor = torch.from_numpy(np.array([0.225, 0.224, 0.229])).reshape(1, 3, 1, 1).cuda().type( # torch.cuda.FloatTensor) # self.mean_tensor = torch.from_numpy(np.array([0.5, 0.5, 0.5])).reshape(1, 3, 1, 1).cuda().type( # torch.cuda.FloatTensor) # self.std_tensor = torch.from_numpy(np.array([0.5, 0.5, 0.5])).reshape(1, 3, 1, 1).cuda().type( # torch.cuda.FloatTensor) if deps is None: if num_blocks == [3, 4, 6, 3]: deps = RESNET50_ORIGIN_DEPS_FLATTENED elif num_blocks == [3, 4, 23, 3]: deps = resnet_bottleneck_origin_deps_flattened(101) else: raise ValueError('???') self.conv1 = builder.Conv2dBNReLU(3, deps[0], kernel_size=7, stride=2, padding=3) self.maxpool = builder.Maxpool2d(kernel_size=3, stride=2, padding=1) # every stage has num_block * 3 + 1 nls = [n * 3 + 1 for n in num_blocks] # num layers in each stage self.stage1 = ResNetBottleneckStage(builder=builder, in_planes=deps[0], stage_deps=deps[1:nls[0] + 1]) self.stage2 = ResNetBottleneckStage(builder=builder, in_planes=deps[nls[0]], stage_deps=deps[nls[0] + 1:nls[0] + 1 + nls[1]], stride=2) self.stage3 = ResNetBottleneckStage( builder=builder, in_planes=deps[nls[0] + nls[1]], stage_deps=deps[nls[0] + nls[1] + 1:nls[0] + 1 + nls[1] + nls[2]], stride=2) self.stage4 = ResNetBottleneckStage( builder=builder, in_planes=deps[nls[0] + nls[1] + nls[2]], stage_deps=deps[nls[0] + nls[1] + nls[2] + 1:nls[0] + 1 + nls[1] + nls[2] + nls[3]], stride=2) self.gap = builder.GAP(kernel_size=7) self.fc = builder.Linear(deps[-1], num_classes)
def __init__(self, builder:ConvBuilder, num_classes): super(MobileV1CifarNet, self).__init__() self.conv1 = builder.Conv2dBNReLU(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1) blocks = [] in_planes = cifar_cfg[0] for x in cifar_cfg: out_planes = x if isinstance(x, int) else x[0] stride = 1 if isinstance(x, int) else x[1] blocks.append(MobileV1Block(builder=builder, in_planes=in_planes, out_planes=out_planes, stride=stride)) in_planes = out_planes self.stem = builder.Sequential(*blocks) self.gap = builder.GAP(kernel_size=8) self.linear = builder.Linear(cifar_cfg[-1], num_classes)
def __init__(self, builder:ConvBuilder, deps): super(LeNet5BN, self).__init__() self.bd = builder stem = builder.Sequential() stem.add_module('conv1', builder.Conv2dBNReLU(in_channels=1, out_channels=deps[0], kernel_size=5)) stem.add_module('maxpool1', builder.Maxpool2d(kernel_size=2)) stem.add_module('conv2', builder.Conv2dBNReLU(in_channels=deps[0], out_channels=deps[1], kernel_size=5)) stem.add_module('maxpool2', builder.Maxpool2d(kernel_size=2)) self.stem = stem self.flatten = builder.Flatten() self.linear1 = builder.IntermediateLinear(in_features=deps[1] * 16, out_features=500) self.relu1 = builder.ReLU() self.linear2 = builder.Linear(in_features=500, out_features=10)
def __init__(self, conv_idx, builder: ConvBuilder, preced_layer_idx, in_features, out_features, bias=True): super(AOFPFCReluLayer, self).__init__() self.conv_idx = conv_idx self.base_path = builder.Linear(in_features=in_features, out_features=out_features, bias=bias) self.relu = builder.ReLU() self.register_buffer('t_value', torch.zeros(1)) self.preced_layer_idx = preced_layer_idx
def __init__(self, block_counts, num_classes, builder:ConvBuilder, deps, use_dropout): super(WRNCifarNet, self).__init__() self.bd = builder converted_deps = wrn_convert_flattened_deps(deps) print('the converted deps is ', converted_deps) self.conv1 = builder.Conv2d(in_channels=3, out_channels=converted_deps[0], kernel_size=3, stride=1, padding=1, bias=False) self.stage1 = self._build_wrn_stage(num_blocks=block_counts[0], stage_input_channels=converted_deps[0], stage_deps=converted_deps[1], downsample=False, use_dropout=use_dropout) self.stage2 = self._build_wrn_stage(num_blocks=block_counts[1], stage_input_channels=converted_deps[1][-1][1], stage_deps=converted_deps[2], downsample=True, use_dropout=use_dropout) self.stage3 = self._build_wrn_stage(num_blocks=block_counts[2], stage_input_channels=converted_deps[2][-1][1], stage_deps=converted_deps[3], downsample=True, use_dropout=use_dropout) self.last_bn = builder.BatchNorm2d(num_features=converted_deps[3][-1][1]) self.linear = builder.Linear(in_features=converted_deps[3][-1][1], out_features=num_classes)
def __init__(self, builder:ConvBuilder, deps=SIMPLE_ALEXNET_DEPS): super(AlexBN, self).__init__() # self.bd = builder stem = builder.Sequential() stem.add_module('conv1', builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=11, stride=4, padding=2)) stem.add_module('maxpool1', builder.Maxpool2d(kernel_size=3, stride=2)) stem.add_module('conv2', builder.Conv2dBNReLU(in_channels=deps[0], out_channels=deps[1], kernel_size=5, padding=2)) stem.add_module('maxpool2', builder.Maxpool2d(kernel_size=3, stride=2)) stem.add_module('conv3', builder.Conv2dBNReLU(in_channels=deps[1], out_channels=deps[2], kernel_size=3, padding=1)) stem.add_module('conv4', builder.Conv2dBNReLU(in_channels=deps[2], out_channels=deps[3], kernel_size=3, padding=1)) stem.add_module('conv5', builder.Conv2dBNReLU(in_channels=deps[3], out_channels=deps[4], kernel_size=3, padding=1)) stem.add_module('maxpool3', builder.Maxpool2d(kernel_size=3, stride=2)) self.stem = stem self.flatten = builder.Flatten() self.linear1 = builder.Linear(in_features=deps[4] * 6 * 6, out_features=4096) self.relu1 = builder.ReLU() self.drop1 = builder.Dropout(0.5) self.linear2 = builder.Linear(in_features=4096, out_features=4096) self.relu2 = builder.ReLU() self.drop2 = builder.Dropout(0.5) self.linear3 = builder.Linear(in_features=4096, out_features=1000)
def __init__(self, builder:ConvBuilder, num_classes, deps=None): super(MobileV1ImagenetNet, self).__init__() if deps is None: deps = MI1_ORIGIN_DEPS assert len(deps) == 27 self.conv1 = builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=3, stride=2, padding=1) blocks = [] for block_idx in range(13): depthwise_channels = int(deps[block_idx * 2 + 1]) pointwise_channels = int(deps[block_idx * 2 + 2]) stride = 2 if block_idx in [1, 3, 5, 11] else 1 blocks.append(MobileV1Block(builder=builder, in_planes=depthwise_channels, out_planes=pointwise_channels, stride=stride)) self.stem = builder.Sequential(*blocks) self.gap = builder.GAP(kernel_size=7) self.linear = builder.Linear(imagenet_cfg[-1], num_classes)
def __init__(self, builder: ConvBuilder, num_blocks, num_classes=1000, deps=None): super(SBottleneckResNet, self).__init__() if deps is None: if num_blocks == [3, 4, 6, 3]: deps = RESNET50_ORIGIN_DEPS_FLATTENED elif num_blocks == [3, 4, 23, 3]: deps = resnet_bottleneck_origin_deps_flattened(101) else: raise ValueError('???') self.conv1 = builder.Conv2dBNReLU(3, deps[0], kernel_size=7, stride=2, padding=3) self.maxpool = builder.Maxpool2d(kernel_size=3, stride=2, padding=1) # every stage has num_block * 3 + 1 nls = [n * 3 + 1 for n in num_blocks] # num layers in each stage self.stage1 = ResNetBottleneckStage(builder=builder, in_planes=deps[0], stage_deps=deps[1:nls[0] + 1]) self.stage2 = ResNetBottleneckStage(builder=builder, in_planes=deps[nls[0]], stage_deps=deps[nls[0] + 1:nls[0] + 1 + nls[1]], stride=2) self.stage3 = ResNetBottleneckStage( builder=builder, in_planes=deps[nls[0] + nls[1]], stage_deps=deps[nls[0] + nls[1] + 1:nls[0] + 1 + nls[1] + nls[2]], stride=2) self.stage4 = ResNetBottleneckStage( builder=builder, in_planes=deps[nls[0] + nls[1] + nls[2]], stage_deps=deps[nls[0] + nls[1] + nls[2] + 1:nls[0] + 1 + nls[1] + nls[2] + nls[3]], stride=2) self.gap = builder.GAP(kernel_size=7) self.fc = builder.Linear(deps[-1], num_classes)
def __init__(self, num_classes, builder: ConvBuilder, deps): super(VANet, self).__init__() sq = builder.Sequential() sq.add_module( 'conv1', builder.Conv2dBNReLU(in_channels=3, out_channels=deps[0], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv2', builder.Conv2dBNReLU(in_channels=deps[0], out_channels=deps[1], kernel_size=3, stride=1, padding=1)) sq.add_module('maxpool1', builder.Maxpool2d(kernel_size=2)) sq.add_module( 'conv3', builder.Conv2dBNReLU(in_channels=deps[1], out_channels=deps[2], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv4', builder.Conv2dBNReLU(in_channels=deps[2], out_channels=deps[3], kernel_size=3, stride=1, padding=1)) sq.add_module('maxpool2', builder.Maxpool2d(kernel_size=2)) sq.add_module( 'conv5', builder.Conv2dBNReLU(in_channels=deps[3], out_channels=deps[4], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv6', builder.Conv2dBNReLU(in_channels=deps[4], out_channels=deps[5], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv7', builder.Conv2dBNReLU(in_channels=deps[5], out_channels=deps[6], kernel_size=3, stride=1, padding=1)) sq.add_module('maxpool3', builder.Maxpool2d(kernel_size=2)) sq.add_module( 'conv8', builder.Conv2dBNReLU(in_channels=deps[6], out_channels=deps[7], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv9', builder.Conv2dBNReLU(in_channels=deps[7], out_channels=deps[8], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv10', builder.Conv2dBNReLU(in_channels=deps[8], out_channels=deps[9], kernel_size=3, stride=1, padding=1)) sq.add_module('maxpool4', builder.Maxpool2d(kernel_size=2)) sq.add_module( 'conv11', builder.Conv2dBNReLU(in_channels=deps[9], out_channels=deps[10], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv12', builder.Conv2dBNReLU(in_channels=deps[10], out_channels=deps[11], kernel_size=3, stride=1, padding=1)) sq.add_module( 'conv13', builder.Conv2dBNReLU(in_channels=deps[11], out_channels=deps[12], kernel_size=3, stride=1, padding=1)) sq.add_module('maxpool5', builder.Maxpool2d(kernel_size=2)) self.stem = sq self.flatten = builder.Flatten() self.linear1 = builder.IntermediateLinear(in_features=deps[12], out_features=512) self.relu = builder.ReLU() self.linear2 = builder.Linear(in_features=512, out_features=num_classes)