def vis_shap(dataset_name,attack_state,loader,img):
    import shap
    raw_img=img.reshape([1,img.shape[0],img.shape[1],img.shape[2]])
    img2=img.reshape([1,img.shape[2],img.shape[0],img.shape[1]])
    img=raw_img
    for data, label in loader:
        x_train=data
    gpu_model = torch.load(dataset_name + str(attack_state) + 'prenet.ckpt')

    if dataset_name=='C10':
        model = resnet.resnet18(indim=3)
    elif dataset_name=='MNIST':
        model = resnet.resnet18(indim=1)
    model.load_state_dict(gpu_model.state_dict())
    model.eval()
    x_train=x_train.numpy()
    background = x_train[np.random.choice(x_train.shape[0], 100, replace=False)]
    background=torch.Tensor(background)
    img2 = torch.Tensor(img2)
    e = shap.DeepExplainer(model, background)
    # ...or pass tensors directly
    # e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), background)
    shap_values = e.shap_values(img2)
    shap_results=[]
    for kk in shap_values:
        shap_results.append(kk.reshape([kk.shape[0],kk.shape[2],kk.shape[3],kk.shape[1]]))
    # plot the feature attributions
    shap.image_plot(shap_results, raw_img)
    plt.savefig(ROOT + dataset_name + 'shap' + str(attack_state) + '.jpg')
    plt.close()
def vis_lrp(dataset_name,attack_state,img,label):
    from attribution_methods.innvestigator import InnvestigateModel
    img=preprocess_transform(img)
    img=img.view(1,img.size(0),img.size(1),img.size(2))
    print(img.size())
    gpu_model = torch.load(dataset_name + str(attack_state) + 'prenet.ckpt')
    #print(model)
    if dataset_name=='C10':
        model = resnet.resnet18(indim=3)
    elif dataset_name=='MNIST':
        model = resnet.resnet18(indim=1)
    model.load_state_dict(gpu_model.state_dict())
    model.eval()
    model = torch.nn.Sequential(model, torch.nn.Softmax(dim=1))
    inn_model = InnvestigateModel(model, lrp_exponent=1,
                                  method="b-rule",
                                  beta=0, epsilon=1e-6).cuda()

    def run_LRP(net, image_tensor):
        return inn_model.innvestigate(in_tensor=image_tensor, rel_for_class=1)

    AD_score, LRP_map = run_LRP(inn_model, img)
    #AD_score = AD_score[0][1].detach().cpu().numpy()
    LRP_map = LRP_map.detach().numpy().squeeze()
    mask = LRP_map
    print(mask.shape)
    raw_img = img.numpy()
    show_cam(raw_img, mask, 'lrp' + str(attack_state) + '.jpg')
Exemple #3
0
    def __init__(self):
        super().__init__()

        self.encoder = resnet18(pretrained=True)
        # self.encoder = resnet34(pretrained=True)
        self.decoder_embed = decoder_fcn.Decoder_LaneNet_TConv_Embed()
        self.decoder_logit = decoder_fcn.Decoder_LaneNet_TConv_Logit()
    def __init__(self,
                 embedding_size,
                 num_classes,
                 backbone='resnet18',
                 mode='t'):
        super(background_resnet, self).__init__()
        self.trainMode = mode
        self.backbone = backbone
        # copying modules from pretrained models
        if backbone == 'resnet50':
            self.pretrained = resnet.resnet50(pretrained=False)
        elif backbone == 'resnet101':
            self.pretrained = resnet.resnet101(pretrained=False)
        elif backbone == 'resnet152':
            self.pretrained = resnet.resnet152(pretrained=False)
        elif backbone == 'resnet18':
            self.pretrained = resnet.resnet18(pretrained=False)
        elif backbone == 'resnet34':
            self.pretrained = resnet.resnet34(pretrained=False)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))

        self.fc0 = nn.Linear(128, embedding_size[0])

        # task specific layers for task 1
        self.fc1 = nn.Linear(128, embedding_size[1])
        self.bn1 = nn.BatchNorm1d(embedding_size[1])
        self.relu1 = nn.ReLU()
        self.last1 = nn.Linear(embedding_size[1], num_classes)

        # task speicific layers for task 2
        self.fc2 = nn.Linear(128, embedding_size[2])
        self.bn2 = nn.BatchNorm1d(embedding_size[2])
        self.relu2 = nn.ReLU()
        self.last2 = nn.Linear(embedding_size[2], num_classes)
    def __init__(self, layers=18, classes=2, with_sp=True):
        super(BiseNet, self).__init__()
        self.with_sp = with_sp

        if layers == 18:
            resnet = models.resnet18(pretrained=True,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))
        elif layers == 34:
            resnet = models.resnet34(pretrained=True,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))

        if self.with_sp:
            self.sp = SpatialPath(in_channels=3, out_channels=128)
        self.cp = ContextPath(in_channels=3, out_channels=128, backbone=resnet)
        self.ffm = FeatureFusionModule(in_channels=256,
                                       out_channels=256)  # concat: 128+128
        self.conv_out = BiseNetHead(in_channels=256,
                                    mid_channels=256,
                                    classes=classes)

        if self.training:
            self.conv_out16 = BiseNetHead(in_channels=128,
                                          mid_channels=64,
                                          classes=classes)
            self.conv_out32 = BiseNetHead(in_channels=128,
                                          mid_channels=64,
                                          classes=classes)
    def __init__(self, 
                 layers,
                 in_channels=192,
                 strides=(1, 2, 2, 2),
                 dilations=(1, 1, 1, 1),
                 pretrained=False,
                 deep_base=False) -> None:
        super(ResNetDCT_345, self).__init__()
        if layers == 18:
            resnet = resnet18(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 34:
            resnet = resnet34(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 50:
            resnet = resnet50(pretrained, deep_base, strides=strides, dilations=dilations)
        elif layers == 101:
            resnet = resnet101(pretrained, deep_base, strides=strides, dilations=dilations)
        self.layer2, self.layer3, self.layer4, self.avgpool, self.fc = \
            resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool, resnet.fc
        self.relu = nn.ReLU(inplace=True)

        out_ch = self.layer2[0].conv1.out_channels
        ks = self.layer2[0].conv1.kernel_size
        stride = self.layer2[0].conv1.stride
        padding =  self.layer2[0].conv1.padding
        self.layer2[0].conv1 = nn.Conv2d(in_channels, out_ch, kernel_size=ks, stride=stride, padding=padding, bias=False)
        init_weight(self.layer2[0].conv1)
        
        out_ch = self.layer2[0].downsample[0].out_channels
        self.layer2[0].downsample[0] = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=2, bias=False)
        init_weight(self.layer2[0].downsample[0])
Exemple #7
0
def load_model(wts_name, n_classes):
    weights_path = './weights/' + wts_name
    model = resnet.resnet18(pretrained=False)
    num_ftrs = model.fc.in_features
    model.fc = nn.Linear(num_ftrs, n_classes)
    model.load_state_dict(torch.load(weights_path))
    return model
Exemple #8
0
def genarate_model(opt):
    if opt.model == 'inceptionv4':

        model = Inceptionv4(classes=opt.fb_cls)
    elif opt.model == 'resnet18':
        model = resnet18(num_classes=opt.fb_cls)
    return model
Exemple #9
0
    def __init__(self,
                 model,
                 modality='rgb',
                 inp=3,
                 num_classes=150,
                 input_size=224,
                 input_segments=8,
                 dropout=0.5):
        super(tsn_model, self).__init__()

        if modality == 'flow':
            inp = 10

        self.num_classes = num_classes
        self.inp = inp
        self.input_segments = input_segments
        self._enable_pbn = False
        if model == 'resnet18':
            self.model = resnet.resnet18(inp=inp, pretrained=True)
        elif model == 'resnet34':
            self.model = resnet.resnet34(inp=inp, pretrained=True)
        elif model == 'resnet50':
            self.model = resnet.resnet50(inp=inp, pretrained=True)
        elif model == 'resnet101':
            self.model = resnet.resnet101(inp=inp, pretrained=True)
        elif model == 'bn_inception':
            self.model = bn_inception.bninception(inp=inp)

        self.modality = modality
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.dropout = nn.Dropout(p=dropout)
        in_channels = self.model.fc.in_features
        self.model.fc = None
        self.fc = nn.Linear(in_channels, num_classes)
        self.consensus = basic_ops.ConsensusModule('avg')
Exemple #10
0
    def __init__(self, layers=18, bins=(1, 2, 3, 6), dropout=0.1, classes=2, zoom_factor=8, use_ppm=True,
                 criterion=nn.CrossEntropyLoss(ignore_index=255), BatchNorm=nn.BatchNorm2d, flow=False, sd=False,
                 pretrained=True):
        super(PSPNet, self).__init__()
        assert layers in [18, 50, 101, 152]
        assert 512 % len(bins) == 0
        assert classes > 1
        assert zoom_factor in [1, 2, 4, 8]
        self.zoom_factor = zoom_factor
        self.use_ppm = use_ppm
        self.flow = flow
        self.sd = sd
        self.criterion = criterion
        models.BatchNorm = BatchNorm

        if layers == 50:
            resnet = models.resnet50(pretrained=pretrained)
        elif layers == 18:
            resnet = models.resnet18(deep_base=False, pretrained=pretrained)
        elif layers == 101:
            resnet = models.resnet101(pretrained=pretrained)
        else:
            resnet = models.resnet152(pretrained=pretrained)
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        for n, m in self.layer3.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (2, 2), (2, 2), (1, 1)
            elif 'conv1' in n:
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)
        for n, m in self.layer4.named_modules():
            if 'conv2' in n:
                m.dilation, m.padding, m.stride = (4, 4), (4, 4), (1, 1)
            elif 'conv1' in n:
                m.stride = (1, 1)
            elif 'downsample.0' in n:
                m.stride = (1, 1)

        fea_dim = 512
        if use_ppm:
            self.ppm = PPM(fea_dim, int(fea_dim / len(bins)), bins, BatchNorm)
            fea_dim *= 2
        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, 256, kernel_size=3, padding=1, bias=False),
            BatchNorm(256),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(256, classes, kernel_size=1)
        )
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False),
                BatchNorm(256),
                nn.ReLU(inplace=True),
                nn.Dropout2d(p=dropout),
                nn.Conv2d(256, classes, kernel_size=1)
            )
    def __init__(self, layers=18, dropout=0.1, classes=2):
        super(FFTNet23, self).__init__()
        if layers == 18:
            resnet = models.resnet18(pretrained=True,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))
        elif layers == 34:
            resnet = models.resnet34(pretrained=True,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))
        elif layers == 50:
            resnet = models.resnet50_semseg(pretrained=True,
                                            deep_base=True,
                                            strides=(1, 2, 1, 1),
                                            dilations=(1, 1, 2, 4))

        if layers == 18 or layers == 34:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.conv2, resnet.bn2, resnet.relu,
                                        resnet.conv3, resnet.bn3, resnet.relu,
                                        resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4

        if layers == 18 or layers == 34:
            fea_dim = 512
            aux_dim = 256
        else:
            fea_dim = 2048
            aux_dim = 1024

        self.freq = nn.ModuleList()
        for i in range(6, 10):  # the number of in_channels is 2^i
            self.freq.append(
                FeatureFrequencySeparationModule(
                    in_channels=2**i,
                    up_channels=2**i if i == 6 else 2**(i - 1),
                    smf_channels=128,
                    high_ratio=1 - 0.2 * (i - 5),
                    # high_ratio=0.5,
                    low_ratio=0.2,
                    up_flag=False if i == 6 else True,
                    smf_flag=True if i % 2 == 0 else False,
                ))
        self.fa_cls_seg = nn.Sequential(nn.Dropout2d(p=dropout),
                                        nn.Conv2d(256, classes, kernel_size=1))
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(aux_dim,
                          aux_dim // 4,
                          kernel_size=3,
                          padding=1,
                          bias=False), nn.BatchNorm2d(aux_dim // 4),
                nn.ReLU(inplace=True), nn.Dropout2d(p=dropout),
                nn.Conv2d(aux_dim // 4, classes, kernel_size=1))
Exemple #12
0
    def __init__(self):
        super().__init__()

        # comment or uncomment to choose from different encoders and decoders
        self.encoder = resnet18(pretrained=True)
        # self.encoder = resnet34(pretrained=True)
 
        self.decoder = decoder_fcn.Decoder_LaneNet_TConv()  # Decoder with Transposed Conv
Exemple #13
0
    def __init__(self, 
                 layers=50, 
                 dropout=0.1, 
                 classes=2,
                 block_size=8, 
                 sub_sampling='4:2:0', 
                 quality_factor=99, 
                 threshold=0.0,
                 vec_dim=300):
        super(DCTNet, self).__init__()
        assert layers in [18, 34, 50, 101]
        assert classes > 1
        self.classes = classes
        self.block_size = block_size
        self.sub_sampling = sub_sampling
        self.quality_factor = quality_factor
        self.thresh = threshold
        # the quantisation matrices for the luminace channel (QY)
        self.QY=np.array([[16,11,10,16,24,40,51,61],
                                [12,12,14,19,26,48,60,55],
                                [14,13,16,24,40,57,69,56],
                                [14,17,22,29,51,87,80,62],
                                [18,22,37,56,68,109,103,77],
                                [24,35,55,64,81,104,113,92],
                                [49,64,78,87,103,121,120,101],
                                [72,92,95,98,112,100,103,99]])
        # the quantisation matrices for the chrominance channels (QC)
        self.QC=np.array([[17,18,24,47,99,99,99,99],
                                [18,21,26,66,99,99,99,99],
                                [24,26,56,99,99,99,99,99],
                                [47,66,99,99,99,99,99,99],
                                [99,99,99,99,99,99,99,99],
                                [99,99,99,99,99,99,99,99],
                                [99,99,99,99,99,99,99,99],
                                [99,99,99,99,99,99,99,99]])
        # Backbone
        # if layers in [18,34]:
        resnet = resnet18(pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = \
             resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
        
        if layers == 18 or layers == 34:
            fea_dim = 512
        else:
            fea_dim = 2048
        down_dim = fea_dim // 4

        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, down_dim, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(down_dim),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(down_dim, classes, kernel_size=1)
        )
Exemple #14
0
    def __init__(self, layers=50, dropout=0.1, classes=2, use_dct=True, use_bise=True, vec_dim=300):
        super(DCTNet, self).__init__()
        assert layers in [18, 34, 50, 101]
        assert classes > 1
        self.use_dct = use_dct
        self.use_bise = use_bise
        self.vec_dim = vec_dim

        if layers == 18:
            resnet = models.resnet18(pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        elif layers == 34:
            resnet = models.resnet34(pretrained=True, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        elif layers == 50:
            resnet = models.resnet50_semseg(pretrained=True, deep_base=True, strides=(1, 2, 1, 1), dilations=(1, 1, 2, 4))
        elif layers == 101:
            resnet = models.resnet101_semseg(pretrained=True, deep_base=True, strides=(1, 2, 1, 1), dilations=(1, 1, 2, 4))

        if layers == 18 or layers == 34:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2,
                                        resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
        
        if layers == 18 or layers == 34:
            fea_dim = 512
            aux_dim = 256
        else:
            fea_dim = 2048
            aux_dim = 1024
        down_dim = fea_dim // 4
        if use_dct:
            self.dct_encoding = DCTModule(vec_dim=self.vec_dim)
        if use_bise:
            self.ffm = FeatureFusionModule(
                in_channels=self.vec_dim + 128, out_channels=fea_dim)  # concat: 128+128
            
        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, down_dim, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(down_dim),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(down_dim, classes, kernel_size=1)
        )
        if self.training:
            self.aux = nn.Sequential(
                nn.Conv2d(aux_dim, aux_dim // 4, kernel_size=3, padding=1, bias=False),
                nn.BatchNorm2d(aux_dim // 4),
                nn.ReLU(inplace=True),
                nn.Dropout2d(p=dropout),
                nn.Conv2d(aux_dim // 4, classes, kernel_size=1)
            )
Exemple #15
0
    def __init__(self,
                 layers=50,
                 dropout=0.1,
                 classes=2,
                 use_dct=True,
                 vec_dim=300):
        super(DCTNet, self).__init__()
        assert layers in [18, 34, 50, 101]
        assert classes > 1
        self.use_dct = use_dct
        self.vec_dim = vec_dim

        if layers == 18:
            resnet = models.resnet18(pretrained=False,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))
        elif layers == 34:
            resnet = models.resnet34(pretrained=True,
                                     deep_base=False,
                                     strides=(1, 2, 2, 2),
                                     dilations=(1, 1, 1, 1))

        if layers == 18 or layers == 34:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                        resnet.conv2, resnet.bn2, resnet.relu,
                                        resnet.conv3, resnet.bn3, resnet.relu,
                                        resnet.maxpool)
        self.layer1, self.layer2, self.layer3 = resnet.layer1, resnet.layer2, resnet.layer3

        if layers == 18 or layers == 34:
            fea_dim = 256
        down_dim = fea_dim // 4
        if use_dct:
            self.dct_encoding = DCTModule(vec_dim=self.vec_dim)
            self.up_conv = ConvBNReLU(fea_dim,
                                      fea_dim,
                                      ks=1,
                                      stride=1,
                                      padding=0)

        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, down_dim, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(down_dim), nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout), nn.Conv2d(down_dim,
                                               classes,
                                               kernel_size=1))
Exemple #16
0
    def __init__(self, opt):
        super(EncoderDecoder, self).__init__()
        if opt.SVR:
            self.encoder = resnet.resnet18(pretrained=False,
                                           num_classes=opt.bottleneck_size)
        else:
            self.encoder = PointNet(nlatent=opt.bottleneck_size)

        self.decoder = Atlasnet(opt)
        self.to(opt.device)

        if not opt.SVR:
            self.apply(weights_init)  # initialization of the weights
        self.eval()
Exemple #17
0
def test(path):
    model = resnet.resnet18(num_classes=3919)
    model.load_state_dict(torch.load(args.checkpoint))
    model.cuda()
    model.eval()

    # probe
    list_probe = os.listdir(os.path.join(path, 'probe'))
    p_array = np.zeros((len(list_probe), 512))  # m个待查询的probe特征
    p_name = []
    for i, img_p in enumerate(sorted(list_probe)):
        p_name.append(img_p)
        img = cv2.imread(os.path.join(path, 'probe', img_p))
        img_tensor = transforms.ToTensor()(img).unsqueeze(0)
        feature, out = model(Variable(img_tensor.cuda()))
        p_array[i] = feature.cpu().detach().numpy()

    # gallery && confidence[m][n]
    list_gallery = os.listdir(os.path.join(path, 'gallery'))
    confidence_array = np.zeros((len(p_array), len(list_gallery)),
                                dtype=np.float)
    for n, dir_name in enumerate(sorted(list_gallery)):  # n个id对应的文件夹
        dir_list = os.listdir(os.path.join(path, 'gallery', dir_name))
        g_array_dir = np.empty((len(dir_list), 512))  # 每个文件夹对应生成的特征
        for i, img_g in enumerate(sorted(dir_list)):
            img = cv2.imread(os.path.join(path, 'gallery', dir_name, img_g))
            img_tensor = transforms.ToTensor()(img).unsqueeze(0)
            feature, out = model(Variable(img_tensor.cuda()))
            g_array_dir[i] = feature.cpu().detach().numpy()
        # confidence_array = strategy_mean(confidence_array, p_array, g_array_dir, n)
        confidence_array = strategy_max(confidence_array, p_array, g_array_dir,
                                        n)

    # 将结果写入csv文件中
    with open('output/test.csv', 'w', newline='') as csvfile:
        writer = csv.writer(csvfile)
        for m in range(confidence_array.shape[0]):
            line = {}
            write_line = [p_name[m]]
            for n in range(confidence_array.shape[1]):
                line[('000' + str(n))[-4:]] = confidence_array[m][n]
            line_reverse = sorted(line.items(),
                                  key=lambda x: x[1],
                                  reverse=True)
            for t in range(len(line)):
                write_line.append(line_reverse[t][0])
                write_line.append(line_reverse[t][1])
            writer.writerow(write_line)
    return p_name
Exemple #18
0
def main(args):
    checkpoint = torch.load(args.model_path)

    backbone = resnet18(num_classes=6).cuda()

    backbone.load_state_dict(checkpoint['backbone'])

    my_val_dataset = HandDataloader(args.test_dataset,
                                    transforms=None,
                                    train=False)
    my_val_dataloader = DataLoader(my_val_dataset,
                                   batch_size=2,
                                   shuffle=True,
                                   num_workers=0)

    validate(my_val_dataloader, backbone)
Exemple #19
0
def main():

    pth_path = '/home/zhaoliu/car_class+ori/results/resnet_18_newval/save_27.pth'

    test_result = Path('/home/zhaoliu/car_class+ori/test_result')
    test_path = '/home/zhaoliu/car_data/训练数据/4.9新加测试集/val_lmdb'
    keys_path = '/home/zhaoliu/car_data/训练数据/4.9新加测试集/new_val.npy'
    # test_path = '/mnt/disk/zhaoliu_data/carlogo/lmdb/carlogo_train_new/car_256_all_lmdb'
    # keys_path = '/home/zhaoliu/car_data/val.npy'

    badcase_npy = '/home/zhaoliu/car_class+ori/test_result/badcase_val_aug.npy'
    model = resnet18(num_classes=21)
    model = resume_model(pth_path, model)
    test_loader, test_batch_logger, test_logger = get_test_utils(
        test_path, test_result, keys_path)
    print('数据加载完毕...')
    test(model, test_loader, test_batch_logger, test_logger, badcase_npy)
Exemple #20
0
def main():

    pth_path = '/home/zhaoliu/car_brand/results/fuxian/save_15.pth'

    test_result = '/home/zhaoliu/car_full/badcase/'
    test_path = '/home/zhaoliu/car_data/训练数据/4.9新加测试集/val_lmdb'
    keys_path = '/home/zhaoliu/car_data/训练数据/4.9新加测试集/new_val.npy'

    fullnpy_path = test_result + 'full_badkeys.npy'
    fulltxt_path = test_result + 'full_badcase_info.txt'

    model = resnet18(num_classes=27249)
    model = resume_model(pth_path, model)
    test_loader = get_test_utils(test_path, keys_path)

    print('数据加载完毕...')
    test(model, test_loader, fullnpy_path, fulltxt_path)
Exemple #21
0
def create_model_optimizer_scheduler(args, dataset_class, optimizer='adam', scheduler='steplr',
                                     load_optimizer_scheduler=False):
    if args.arch == 'wideresnet':
        model = WideResNet(depth=args.layers,
                           num_classes=dataset_class.num_classes,
                           widen_factor=args.widen_factor,
                           dropout_rate=args.drop_rate)
    elif args.arch == 'densenet':
        model = densenet121(num_classes=dataset_class.num_classes)
    elif args.arch == 'lenet':
        model = LeNet(num_channels=3, num_classes=dataset_class.num_classes,
                      droprate=args.drop_rate, input_size=dataset_class.input_size)
    elif args.arch == 'resnet':
        model = resnet18(num_classes=dataset_class.num_classes, input_size=dataset_class.input_size,
                         drop_rate=args.drop_rate)
    else:
        raise NotImplementedError

    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))

    model = model.cuda()

    if optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
                                    nesterov=args.nesterov, weight_decay=args.weight_decay)

    if scheduler == 'steplr':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.2)
    else:
        args.iteration = args.fixmatch_k_img // args.batch_size
        args.total_steps = args.fixmatch_epochs * args.iteration
        scheduler = get_cosine_schedule_with_warmup(
            optimizer, args.fixmatch_warmup * args.iteration, args.total_steps)

    if args.resume:
        if load_optimizer_scheduler:
            model, optimizer, scheduler = resume_model(args, model, optimizer, scheduler)
        else:
            model, _, _ = resume_model(args, model)

    return model, optimizer, scheduler
    def __init__(self, layers=50, block_size=8):
        super(ContextPath, self).__init__()
        assert layers in [18, 34, 50, 101]
        self.layers = layers
        self.block_size = block_size
        # Backbone
        # if layers in [18,34]:
        #     resnet = ResNetDCT_345(layers, in_channels=192, pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        #     # resnet = ResNetDCT_345(layers, pretrained=False, deep_base=False, strides=(1, 2, 1, 1), dilations=(1, 1, 2, 4))
        # self.layer2, self.layer3, self.layer4 = \
        #      resnet.layer2, resnet.layer3, resnet.layer4

        # resnet = ResNetDCT_2345(layers, in_channels=192, pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        # if layers in [18,34]:
        #     self.down_layer = resnet.down_layer
        resnet = resnet18(pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = \
             resnet.layer1,resnet.layer2, resnet.layer3, resnet.layer4
    def __init__(self,
                 backbone='resnet18',
                 pretrained_base=True,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(ContextPath, self).__init__()
        if backbone == 'resnet18':
            pretrained = resnet18(pretrained=pretrained_base, **kwargs)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        self.conv1 = pretrained.conv1
        self.bn1 = pretrained.bn1
        self.relu = pretrained.relu
        self.maxpool = pretrained.maxpool
        self.layer1 = pretrained.layer1
        self.layer2 = pretrained.layer2
        self.layer3 = pretrained.layer3
        self.layer4 = pretrained.layer4

        inter_channels = 128
        self.global_context = _GlobalAvgPooling(512, inter_channels,
                                                norm_layer)

        self.arms = nn.ModuleList([
            AttentionRefinmentModule(512, inter_channels, norm_layer,
                                     **kwargs),
            AttentionRefinmentModule(256, inter_channels, norm_layer, **kwargs)
        ])
        self.refines = nn.ModuleList([
            _ConvBNReLU(inter_channels,
                        inter_channels,
                        3,
                        1,
                        1,
                        norm_layer=norm_layer),
            _ConvBNReLU(inter_channels,
                        inter_channels,
                        3,
                        1,
                        1,
                        norm_layer=norm_layer)
        ])
def save_model(data_loader, dataset_sizes, n_classes, device, arg_data, version, num_epochs):
    # 모델 학습
    model_ft = resnet.resnet18(pretrained=True)
    num_ftrs = model_ft.fc.in_features
    model_ft.fc = nn.Linear(num_ftrs, n_classes)
    model_ft = model_ft.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

    model_ft, best_acc, fvs, label_epoch = train_model(data_loader, dataset_sizes,
                                                       model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                                                       num_epochs, device)

    ################
    # weight 저장
    wts_save_path = './weights/'
    torch.save(model_ft.state_dict(), wts_save_path+'weights_{}_ver{} (epochs={}).pth'.format(arg_data, version, num_epochs))
    return fvs, label_epoch
    def __init__(self, layers=18, classes=2):
        super(TriSeNet, self).__init__()

        if layers == 18:
            backbone = models.resnet18(pretrained=True,
                                       deep_base=False,
                                       strides=(1, 2, 2, 2),
                                       dilations=(1, 1, 1, 1))
        elif layers == 34:
            backbone = models.resnet34(pretrained=True,
                                       deep_base=False,
                                       strides=(1, 2, 2, 2),
                                       dilations=(1, 1, 1, 1))

        # the initial layer conv is 7x7, instead of three 3x3
        self.layer0 = nn.Sequential(backbone.conv1, backbone.bn1,
                                    backbone.relu, backbone.maxpool)
        # stage channels for resnet18 and resnet34 are:(64, 128, 256, 512)
        self.layer1, self.layer2, self.layer3, self.layer4 \
            = backbone.layer1, backbone.layer2, backbone.layer3, backbone.layer4

        # self.gap = nn.AdaptiveAvgPool2d(1)  # Global Average Pooling

        # self.up_16_8 = UpModule(in_channels=256, out_channels=128, up_scale=2)  # feat_16 up to the size of feat_8
        # self.up_32_8 = UpModule(in_channels=512, out_channels=128, up_scale=4)
        # self.down_8_16 = DownModule(in_channels=128, out_channels=256, down_scale=2) # feat_8 down to the size of feat_16
        # self.up_32_16 = UpModule(in_channels=512, out_channels=256, up_scale=2)
        self.down_8_32 = DownModule(in_channels=128,
                                    out_channels=512,
                                    down_scale=4)
        # self.down_16_32 = DownModule(in_channels=256, out_channels=512, down_scale=2)
        self.relu = nn.ReLU(inplace=True)

        self.sa_8_32 = SelfAttentionBlock(512)
        # self.ca_32_8 = ChannelAttentionModule(in_channels=128, reduction=4)

        # self.cp = ContextPath(in_channels=3, out_channels=128, backbone=resnet)

        self.seg_head = SegHead(in_channels=640,
                                mid_channels=256,
                                classes=classes)
Exemple #26
0
    def __init__(self, layers=50,  dropout=0.1, classes=2, fuse=8):
        super(TriSeNet1, self).__init__()
        assert layers in [18, 34, 50, 101]
        assert classes > 1
        self.fuse = fuse
        # Backbone
        if layers == 18:
            resnet = models.resnet18(pretrained=False, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        elif layers == 34:
            resnet = models.resnet34(pretrained=True, deep_base=False, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1))
        elif layers == 50:
            resnet = models.resnet50_semseg(pretrained=True, deep_base=True, strides=(1, 2, 1, 1), dilations=(1, 1, 2, 4))
        elif layers == 101:
            resnet = models.resnet101_semseg(pretrained=True, deep_base=True, strides=(1, 2, 1, 1), dilations=(1, 1, 2, 4))

        if layers == 18 or layers == 34:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)
        else:
            self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.conv2,
                                        resnet.bn2, resnet.relu, resnet.conv3, resnet.bn3, resnet.relu, resnet.maxpool)
        self.layer1, self.layer2, self.layer3, self.layer4 = resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4
        
        if layers == 18 or layers == 34:
            fea_dim = 512
            aux_dim = 256
        else:
            fea_dim = 2048
            aux_dim = 1024
        down_dim = fea_dim // 4

        self.cls = nn.Sequential(
            nn.Conv2d(fea_dim, down_dim, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(down_dim),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=dropout),
            nn.Conv2d(down_dim, classes, kernel_size=1)
        )
        if self.fuse == 16 or self.fuse == 8:
            self.fuse_16 = nn.Conv2d(fea_dim//2, classes, kernel_size=1)
        if self.fuse == 8:
            self.fuse_8 = nn.Conv2d(fea_dim//4, classes, kernel_size=1)
Exemple #27
0
    def __init__(self, embedding_size, num_classes, backbone='resnet50'):
        super(background_resnet, self).__init__()
        self.backbone = backbone
        # copying modules from pretrained models
        if backbone == 'resnet50':
            self.pretrained = resnet.resnet50(pretrained=False)
        elif backbone == 'resnet101':
            self.pretrained = resnet.resnet101(pretrained=False)
        elif backbone == 'resnet152':
            self.pretrained = resnet.resnet152(pretrained=False)
        elif backbone == 'resnet18':
            self.pretrained = resnet.resnet18(pretrained=False)
        elif backbone == 'resnet34':
            self.pretrained = resnet.resnet34(pretrained=False)
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))

        self.fc0 = nn.Linear(512, embedding_size)
        self.bn0 = nn.BatchNorm1d(embedding_size)
        self.relu = nn.ReLU()
        self.last = nn.Linear(embedding_size, num_classes)
 def __init__(self, 
              layers,
              in_channels=192,
              strides=(1, 2, 2, 2),
              dilations=(1, 1, 1, 1),
              pretrained=False,
              deep_base=False) -> None:
     super(ResNetDCT_2345, self).__init__()
     self.layers = layers
     if layers == 18:
         resnet = resnet18(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 34:
         resnet = resnet34(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 50:
         resnet = resnet50(pretrained, deep_base, strides=strides, dilations=dilations)
     elif layers == 101:
         resnet = resnet101(pretrained, deep_base, strides=strides, dilations=dilations)
     self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool, self.fc = \
         resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4, resnet.avgpool, resnet.fc
     self.relu = nn.ReLU(inplace=True)
     if layers in [18, 34]:
         in_ch = self.layer1[0].conv1.in_channels
         self.down_layer = nn.Sequential(
             nn.Conv2d(in_channels, in_ch, kernel_size=1, stride=1, bias=False),
             nn.BatchNorm2d(in_ch),
             nn.ReLU(inplace=True)
         )
         # initialize the weight for only one layer
         for m in self.down_layer.modules():
             init_weight(m)
     else:
         out_ch = self.layer1[0].conv1.out_channels
         self.layer1[0].conv1 = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=1, bias=False)
         init_weight(self.layer1[0].conv1)
        
         out_ch = self.layer1[0].downsample[0].out_channels
         self.layer1[0].downsample[0] = nn.Conv2d(in_channels, out_ch, kernel_size=1, stride=1, bias=False)
         init_weight(self.layer1[0].downsample[0])
Exemple #29
0
elif args.activation == 'sigmoid':
    activate = nn.Sigmoid
elif args.activation == 'tanh':
    activate = nn.Tanh
else:
    raise ValueError(
        'activation should be relu/elu/leakyrelu/rrelu/sigmoid/tanh')

# 设置是否使用多一层隐藏层+dropout
hidden = int(args.hidden)

# 选择模型
if args.layer == '18':
    net = resnet18(pretrained=False,
                   progress=True,
                   activate=activate,
                   hidden=hidden,
                   num_classes=10)
elif args.layer == '34':
    net = resnet34(pretrained=False,
                   progress=True,
                   activate=activate,
                   hidden=hidden,
                   num_classes=10)
elif args.layer == '50':
    net = resnet50(pretrained=False,
                   progress=True,
                   activate=activate,
                   hidden=hidden,
                   num_classes=10)
elif args.layer == '101':
Exemple #30
0
        tf_record = './runs/'
        if not os.path.exists(tf_record):
            os.mkdir(tf_record)

        tf_record = os.path.join(tf_record, args.arch + '_' + args.method)
        writer = SummaryWriter(tf_record)
    else:
        writer = None

    # batch size
    train_batchSize = [args.label_batch_size, args.unlabel_batch_size]

    # backbone architecture
    if args.arch == 'resnet18':
        backbone = resnet.resnet18(feature_len=args.feat_len)
    elif args.arch == 'resnet34':
        backbone = resnet.resnet34(feature_len=args.feat_len)
    elif args.arch == 'resnet50':
        backbone = resnet.resnet50(feature_len=args.feat_len)
    elif args.arch == 'resnet101':
        backbone = resnet.resnet101(feature_len=args.feat_len)
    elif args.arch == 'resnet152':
        backbone = resnet.resnet152(feature_len=args.feat_len)
    elif args.arch == 'usr':
        backbone = model_usr
    else:
        raise NameError(
            'Arch %s is not support. Please enter from [resnet18, resnet34, resnet50, resnet101, resnet152, usr]'
            % args.arch)