def __init__(self, num_classes=1000, transform_input=False, in_channels=3): super(Inception3XS, self).__init__() self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(in_channels, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.fc = nn.Linear(288, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.numel())) values = values.view(m.weight.size()) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self): super(Inception3, self).__init__() self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) # if aux_logits: # self.AuxLogits = InceptionAux(768, num_classes) self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.data.numel())) values = values.view(m.weight.data.size()) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, num_classes=NLABEL, aux_logits=False, transform_input=False): super(Inception3, self).__init__() self.aux_logits = aux_logits self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(4, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes) self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) self.fc = nn.Linear(2048, num_classes) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.numel())) values = values.view(m.weight.size()) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self, config, anchors, num_cls, transform_input=False): nn.Module.__init__(self) self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) # aux_logits self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) self.conv = nn.Conv2d(2048, model.output_channels(len(anchors), num_cls), 1) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.data.numel())) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False, in_channels=3): super().__init__() self.aux_logits = aux_logits self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(in_channels, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) if aux_logits: self.AuxLogits = InceptionAux(288, num_classes) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) self.Vessel_Preconv = nn.Conv2d(1, 768, 1, bias=False) self.VesselMultiplier = nn.Conv2d(in_channels=1, out_channels=768, kernel_size=1, bias=False) self.VesselMultiplier.weight.data.copy_(torch.ones((768, 1, 1, 1))) self.VesselMultiplier.weight.requires_grad = False self.Vessel = BasicConv3d(768, 768, kernel_size=(2, 3, 3), padding=(0, 1, 1)) self.fc = nn.Linear(768, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.numel())) values = values.view(m.weight.size()) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) m = self.Vessel_Preconv values = torch.ones(m.weight.numel()) values = values.view(m.weight.size()) m.weight.data.copy_(values) for p in m.parameters(): p.requires_grad = False
def __init__(self, config_channels, anchors, num_cls, transform_input=False): nn.Module.__init__(self) self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) # aux_logits self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) self.conv = nn.Conv2d(2048, model.output_channels(len(anchors), num_cls), 1) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.data.numel())) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() if config_channels.config.getboolean('model', 'pretrained'): url = _model.model_urls['inception_v3_google'] logging.info('use pretrained model: ' + url) state_dict = self.state_dict() for key, value in torch.utils.model_zoo.load_url(url).items(): if key in state_dict: state_dict[key] = value self.load_state_dict(state_dict)
def append_inception_a(module_list, dummy_input, config): in_channel = get_channel_count(module_list, dummy_input) pool_feature = random.choice(config['out_channel']) block = InceptionA(in_channel, pool_feature, BasicConv2d).to(dummy_input.device) module_list.append(block) return { 'in_channels': in_channel, 'pool_features': pool_feature, 'conv_block': str(BasicConv2d) }
def __init__(self, num_classes=80, aux_logits=True, transform_input=False, apply_avgpool=False): super(Inception3, self).__init__() self.aux_logits = aux_logits self.transform_input = transform_input self.apply_avgpool = apply_avgpool self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes) self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048)
def __init__(self, pretrained, model_name, hid_size=100, dropout=0.5): super(DIIN, self).__init__() vocab_size, emb_size = pretrained.shape self.hid_size = hid_size self.dropout = dropout self.embedding = nn.Embedding.from_pretrained( torch.from_numpy(pretrained).float()) self.gru_enc = nn.LSTM(input_size=emb_size, hidden_size=hid_size, batch_first=True, bidirectional=False) if model_name == 'diin_my': self.features = nn.Sequential( nn.Conv2d(hid_size, 64, 1), nn.Conv2d(64, 128, 7, 1, 3, bias=False), nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveMaxPool2d((32, 32)), nn.Conv2d(128, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(), nn.AdaptiveMaxPool2d((16, 16)), nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(), nn.AdaptiveMaxPool2d((8, 8)), nn.Conv2d(256, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(), nn.AdaptiveMaxPool2d((4, 4)), nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(), nn.MaxPool2d(4), ) self.fc1 = nn.Linear(512, 100) elif model_name == 'diin_densenet': #self.features = DNet(args) self.features = DNet(hid_size, block_config=(4, 4, 4), drop_rate=0.5, num_init_features=200) #self.features = DNet(args, block_config=(2, 2, 2), drop_rate=0.3, num_init_features=100) #self.fc1 = nn.Linear(2016, 100) self.fc1 = nn.Linear(2466, 100) #self.fc1 = nn.Linear(3425, 100) elif model_name == 'diin_inception': # input: 16 * 16 self.features = nn.Sequential( nn.Conv2d(hid_size, 64, 1), InceptionA(64, 32), nn.MaxPool2d(2), InceptionA(256, 32), nn.MaxPool2d(2), nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(), nn.AvgPool2d(4)) self.fc1 = nn.Linear(512, 100) elif model_name == 'diin_inceptionB': # input: 24 * 24 self.features = nn.Sequential( nn.Conv2d(hid_size, 100, 1), nn.BatchNorm2d(100), nn.ReLU(), InceptionA(100, 32), nn.MaxPool2d(2), InceptionA(256, 64), nn.MaxPool2d(2), InceptionA(288, 32), nn.MaxPool2d(2), nn.Conv2d(288, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(), nn.AvgPool2d(3)) self.fc1 = nn.Linear(512, 100) self.last_layer = nn.Linear(100, 1) self.dp = nn.Dropout(self.dropout) self.hn = HighWay(hid_size)
def __init__(self, use_bottleneck=True, bottleneck_dim=256, new_cls=False, class_num=1000, aux_logits=True, transform_input=False): super(Inception3Fc, self).__init__() model_inception = inception_v3(pretrained=True) self.aux_logits = aux_logits self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) if aux_logits: self.AuxLogits = InceptionAux(768, class_num) self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) self.fc = nn.Linear(2048, class_num) # self.avgpool = model_xception.avgpool self.feature_layers = nn.Sequential( self.Conv2d_1a_3x3, self.Conv2d_2a_3x3, self.Conv2d_2b_3x3, self.Conv2d_3b_1x1, self.Conv2d_4a_3x3, self.Mixed_5b, self.Mixed_5c, self.Mixed_5d, self.Mixed_6a, self.Mixed_6b, self.Mixed_6c, self.Mixed_6d, self.Mixed_6e, self.Mixed_7a, self.Mixed_7b, self.Mixed_7c, ) #################### self.use_bottleneck = use_bottleneck self.new_cls = new_cls # print("classes inside network",new_cls) if new_cls: if self.use_bottleneck: print(bottleneck_dim) self.bottleneck = nn.Linear(model_inception.fc.in_features, bottleneck_dim) self.fc = nn.Linear(bottleneck_dim, class_num) self.bottleneck.apply(init_weights) self.fc.apply(init_weights) self.__in_features = bottleneck_dim else: self.fc = nn.Linear(model_inception.fc.in_features, class_num) self.fc.apply(init_weights) self.__in_features = model_inception.fc.in_features else: self.fc = model_inception.fc self.__in_features = model_inception.fc.in_features