Beispiel #1
0
def InceptionResNetV2():
    """"Inception ResNetV2"""
    model = inceptionresnetv2(pretrained="imagenet")
    model.conv2d_7b = BasicConv2d(2080, 128, 1, 1)
    model.last_linear = nn.Linear(128, cfg.num_classes)

    return model
Beispiel #2
0
def get_inception_resnet_v2():
    class BasicConv2d(nn.Module):

        def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):
            super(BasicConv2d, self).__init__()
            self.conv = nn.Conv2d(in_planes, out_planes,
                                  kernel_size=kernel_size, stride=stride,
                                  padding=padding, bias=False)  # verify bias false
            self.bn = nn.BatchNorm2d(out_planes,
                                     eps=0.001,  # value found in tensorflow
                                     momentum=0.1,  # default pytorch value
                                     affine=True)
            self.relu = nn.ReLU(inplace=False)

        def forward(self, x):
            x = self.conv(x)
            x = self.bn(x)
            x = self.relu(x)
            return x

    model = inceptionresnetv2(pretrained="imagenet")
    model.conv2d_1a = BasicConv2d(config.in_channels, 32, kernel_size=3, stride=2)
    model.avgpool_1a = nn.AdaptiveAvgPool2d(1)
    model.last_linear = nn.Sequential(
        nn.BatchNorm1d(1536),
        nn.Dropout(0.5),
        nn.Linear(1536, config.num_classes),
    )

    if config.with_mse_loss:
        model.reconstruct_layer = nn.Sequential(
            nn.BatchNorm2d(1536),
            nn.UpsamplingBilinear2d([int(config.img_height/16), int(config.img_width/16)]),
            nn.Conv2d(1536, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(32, affine=True),
            nn.ReLU(),
            nn.UpsamplingBilinear2d([int(config.img_height/8), int(config.img_width/8)]),
            nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(32, affine=True),
            nn.ReLU(),
            nn.UpsamplingBilinear2d([int(config.img_height/4), int(config.img_width/4)]),
            nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(32, affine=True),
            nn.ReLU(),
            nn.UpsamplingBilinear2d([int(config.img_height/2), int(config.img_width/2)]),
            nn.Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.BatchNorm2d(32, affine=True),
            nn.ReLU(),
            nn.UpsamplingBilinear2d([config.img_height, config.img_width]),
            nn.Conv2d(32, config.out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
            nn.Sigmoid(),
        )
    return model
    def __init__(self, feature_extraction_type='default'):
        super().__init__()

        if feature_extraction_type not in [
                'default', 'concat', 'conv2d_1a', 'conv2d_2a', 'conv2d_2b',
                'maxpool_3a', "conv2d_3b", "conv2d_4a", "maxpool_5a",
                "mixed_5b", "mixed_6a", "mixed_7a", "conv2d_7b"
        ]:
            raise NotImplementedError(
                "Unknown 'feature_extraction_type': {}".format(
                    feature_extraction_type))

        self.feature_extraction_type = feature_extraction_type
        self.model = inceptionresnetv2()
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not os.path.exists(args.output_dir):
        print("Error: Invalid output folder %s" % args.output_dir)
        exit(-1)
        
    with torch.no_grad():
        config, resmodel = get_model1()
        #config, inresmodel = get_model2()
        #config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net    
        #net2 = inresmodel.net
        #net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_inres_014.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        #inresmodel.load_state_dict(checkpoint['state_dict'])
    #else:
        #inresmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_incepv3_012.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        #incepv3model.load_state_dict(checkpoint['state_dict'])
    #else:
        #incepv3model.load_state_dict(checkpoint)
    
    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        #inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        #incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    #inresmodel.eval()
    resmodel.eval()
    #incepv3model.eval()
    rexmodel.eval()

    #inceptionresnetv2 for ramdon padding
    model = inceptionresnetv2(num_classes=1001, pretrained='imagenet+background')
    model = model.cuda()
    model.eval()

    ''' watch the input dir for defense '''
    observer = Observer()
    event_handler = FileEventHandler(batch_size=args.batch_size,
                                     input_dir=args.input_dir,
                                     net1=net1,
                                     net4=net4,
                                     model=model,
                                     itr=args.itr,
                                     output_dir=args.output_dir,
                                     no_gpu=args.no_gpu)

    observer.schedule(event_handler, args.input_dir, recursive=True)
    observer.start()

    print("watchdog start...")

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()

    print("\nwatchdog stoped!")
Beispiel #5
0
def main():
    start_time = time.time()  

    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
        
    tf = transforms.Compose([
           transforms.Resize([args.img_size,args.img_size]),
            transforms.ToTensor()
    ])

    tf_flip = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ToTensor()
    ])  

    tf_shrink = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize([args.img_size,args.img_size]),
        transforms.ToTensor()
    ])

    with torch.no_grad():
        mean_torch = autograd.Variable(torch.from_numpy(np.array([0.485, 0.456, 0.406]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_torch = autograd.Variable(torch.from_numpy(np.array([0.229, 0.224, 0.225]).reshape([1,3,1,1]).astype('float32')).cuda())
        mean_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())

        dataset = Dataset(args.input_dir, transform=tf)
        loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
    
        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net    
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)
    
    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()


    #inceptionresnetv2 for ramdon padding
    model = inceptionresnetv2(num_classes=1001, pretrained='imagenet+background')
    model = model.cuda()
    model.eval()

    labels_denoise = {}
    labels_random = {}
    denoise_outputs = []
    random_outputs = []
    for batch_idx, (input, _) in enumerate(loader):
        # Random padding
        # bilateral filtering
        temp_numpy = input.data.numpy()
        temp_numpy = np.reshape(temp_numpy, (3, 299, 299))
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = cv2.bilateralFilter(temp_numpy,6,50,50)
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = np.reshape(temp_numpy, (1, 3, 299, 299))
        input00 = torch.from_numpy(temp_numpy)
        length_input, _, _, _ = input.size()
        iter_labels = np.zeros([length_input, 1001, args.itr])
        for j in range(args.itr):
            # random fliping
            input0 = batch_transform(input00, tf_flip, 299)
            # random resizing
            resize_shape_ = random.randint(310, 331)
            image_resize = 331
            tf_rand_resize = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize([resize_shape_, resize_shape_]),
                transforms.ToTensor()
            ]) 
            input1 = batch_transform(input0, tf_rand_resize, resize_shape_)

            # ramdom padding
            shape = [random.randint(0, image_resize - resize_shape_), random.randint(0, image_resize - resize_shape_), image_resize]
            # print(shape)
       
            new_input = padding_layer_iyswim(input1, shape, tf_shrink)
            #print(type(new_input))
            if not args.no_gpu:
                new_input = new_input.cuda()
            with torch.no_grad():
                input_var = autograd.Variable(new_input)
                logits = model(input_var)
                labels = logits.max(1)[1]
                labels_index = labels.data.tolist() 
                print(len(labels_index))
                iter_labels[range(len(iter_labels)), labels_index, j] = 1
        final_labels = np.sum(iter_labels, axis=-1)
        labels = np.argmax(final_labels, 1)
        print(labels)
        random_outputs.append(labels)  
        
        # Denoise
        if not args.no_gpu:
            input = input.cuda()
        with torch.no_grad():
            input_var = autograd.Variable(input)
            input_tf = (input_var-mean_tf)/std_tf
            input_torch = (input_var - mean_torch)/std_torch
        
            labels1 = net1(input_torch,True)[-1]
            # labels2 = net2(input_tf,True)[-1]
            # labels3 = net3(input_tf,True)[-1]
            labels4 = net4(input_torch,True)[-1]

            labels = (labels1+labels4).max(1)[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
        denoise_outputs.append(labels.data.cpu().numpy())

        
          
    denoise_outputs = np.concatenate(denoise_outputs, axis=0)
    random_outputs = np.concatenate(random_outputs, axis=0)

    filenames = dataset.filenames()
    filenames = [ os.path.basename(ii) for ii in filenames ]
    labels_denoise.update(dict(zip(filenames, denoise_outputs)))
    labels_random.update(dict(zip(filenames, random_outputs)))

    # diff filtering
    print('diff filtering...')
    if (len(labels_denoise) == len(labels_random)):
        # initializing 
        final_labels = labels_denoise
        # Compare
        diff_index = [ii for ii in labels_denoise if labels_random[ii] != labels_denoise[ii]]
        if (len(diff_index) != 0):
            # print(diff_index)
            for index in diff_index:
                final_labels[index] = 0
    else:
        print("Error: Number of labels returned by two defenses doesn't match")
        exit(-1)
    
    elapsed_time = time.time() - start_time
    print('elapsed time: {0:.0f} [s]'.format(elapsed_time))

    with open(args.output_file, 'w') as out_file:
        for filename, label in final_labels.items():
            kmean = auxkmean(64, 10)
            kmean.importmodel()
            kmean_img = args.input_dir + '/' + filename
            kmean_label = kmean.compare(kmean_img,label)
            out_file.write('{0},{1}\n'.format(filename, kmean_label))    
Beispiel #6
0
 def __init__(self, config):
     super().__init__(config)
     self.freeze_num = 8
     self.model = inceptionresnetv2(num_classes=1000, pretrained="imagenet")
     self.dropout = nn.Dropout2d(p=0.2)
     self.get_last_linear(in_features=1536)
Beispiel #7
0
def Atlas_Inception(model_name,
                    pretrained=False,
                    drop_rate=0.,
                    num_channels=4):
    if model_name in ['bninception', 'inceptionv2']:
        print("Using BN Inception")
        if pretrained:
            print('Loading weights...')
            model = bninception(pretrained="imagenet")
        else:
            model = bninception(pretrained=None)
        model.global_pool = nn.AdaptiveAvgPool2d(1)

        if num_channels not in [3, 4]:
            raise ValueError('num_channels should be 3 or 4.')

        if num_channels == 4:
            nconv = nn.Conv2d(4,
                              64,
                              kernel_size=(7, 7),
                              stride=(2, 2),
                              padding=(3, 3))

            if pretrained:
                nconv.weight.data[:, :
                                  3, :, :] = model.conv1_7x7_s2.weight.data.clone(
                                  )
                nconv.weight.data[:,
                                  3, :, :] = model.conv1_7x7_s2.weight.data[:,
                                                                            1, :, :].clone(
                                                                            )

            model.conv1_7x7_s2 = nconv

        model.last_linear = nn.Sequential(
            nn.BatchNorm1d(1024),
            nn.Dropout(0.5),
            nn.Linear(1024, 28),
        )

    if model_name == 'inceptionresnetv2':
        print("Using Inception Resnet v2")
        if pretrained:
            print('Loading weights...')
            model = inceptionresnetv2(pretrained="imagenet")
        else:
            model = inceptionresnetv2(pretrained=None)
        model.avgpool_1a = nn.AdaptiveAvgPool2d(1)

        if num_channels not in [3, 4]:
            raise ValueError('num_channels should be 3 or 4.')

        if num_channels == 4:
            nconv = nn.Conv2d(4,
                              32,
                              kernel_size=(3, 3),
                              stride=(2, 2),
                              bias=False)

            if pretrained:
                nconv.weight.data[:, :
                                  3, :, :] = model.conv2d_1a.conv.weight.data.clone(
                                  )
                nconv.weight.data[:,
                                  3, :, :] = model.conv2d_1a.conv.weight.data[:,
                                                                              1, :, :].clone(
                                                                              )

            model.conv2d_1a.conv = nconv

        model.last_linear = nn.Sequential(
            nn.BatchNorm1d(1536),
            nn.Dropout(0.5),
            nn.Linear(1536, 28),
        )

    if model_name == 'inceptionv4':
        print("Using Inception v4")
        if pretrained:
            print('Loading weights...')
            model = inceptionv4(pretrained="imagenet")
        else:
            model = inceptionv4(pretrained=None)
        model.avg_pool = nn.AdaptiveAvgPool2d(1)

        if num_channels not in [3, 4]:
            raise ValueError('num_channels should be 3 or 4.')

        if num_channels == 4:
            nconv = nn.Conv2d(4,
                              32,
                              kernel_size=(3, 3),
                              stride=(2, 2),
                              bias=False)

            if pretrained:
                nconv.weight.data[:, :3, :, :] = model.features[
                    0].conv.weight.data.clone()
                nconv.weight.data[:, 3, :, :] = model.features[
                    0].conv.weight.data[:, 1, :, :].clone()

            model.features[0].conv = nconv

        model.last_linear = nn.Sequential(
            nn.BatchNorm1d(1536),
            nn.Dropout(0.5),
            nn.Linear(1536, 28),
        )

    return model