Beispiel #1
0
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not args.output_file:
        print("Error: Please specify an output file")
        exit(-1)

    tf = transforms.Compose(
        [transforms.Scale([299, 299]),
         transforms.ToTensor()])

    mean_torch = autograd.Variable(torch.from_numpy(
        np.array([0.485, 0.456, 0.406]).reshape([1, 3, 1,
                                                 1]).astype('float32')).cuda(),
                                   volatile=True)
    std_torch = autograd.Variable(torch.from_numpy(
        np.array([0.229, 0.224, 0.225]).reshape([1, 3, 1,
                                                 1]).astype('float32')).cuda(),
                                  volatile=True)
    mean_tf = autograd.Variable(torch.from_numpy(
        np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda(),
                                volatile=True)
    std_tf = autograd.Variable(torch.from_numpy(
        np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda(),
                               volatile=True)

    dataset = Dataset(args.input_dir, transform=tf)
    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False)

    config, resmodel = get_model1()
    config, inresmodel = get_model2()
    config, incepv3model = get_model3()
    config, rexmodel = get_model4()
    net1 = resmodel.net
    net2 = inresmodel.net
    net3 = incepv3model.net
    net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    outputs = []
    for batch_idx, (input, _) in enumerate(loader):
        if not args.no_gpu:
            input = input.cuda()
        input_var = autograd.Variable(input, volatile=True)
        input_tf = (input_var - mean_tf) / std_tf
        input_torch = (input_var - mean_torch) / std_torch

        #clean1 = net1.denoise[0](input_torch)
        #clean2 = net2.denoise[0](input_tf)
        #clean3 = net3.denoise(input_tf)

        #labels1 = net1(clean1,False)[-1]
        #labels2 = net2(clean2,False)[-1]
        #labels3 = net3(clean3,False)[-1]

        labels1 = net1(input_torch, True)[-1]
        labels2 = net2(input_tf, True)[-1]
        labels3 = net3(input_tf, True)[-1]
        labels4 = net4(input_torch, True)[-1]

        labels = (labels1 + labels2 + labels3 + labels4).max(
            1
        )[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
        outputs.append(labels.data.cpu().numpy())
    outputs = np.concatenate(outputs, axis=0)

    with open(args.output_file, 'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, outputs):
            filename = os.path.basename(filename)
            out_file.write('{0},{1}\n'.format(filename, label))
Beispiel #2
0
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not os.path.exists(args.output_dir):
        print("Error: Invalid output folder %s" % args.output_dir)
        exit(-1)

    with torch.no_grad():
        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()
    ''' watch the input dir for defense '''
    observer = Observer()
    event_handler = FileEventHandler(batch_size=args.batch_size,
                                     input_dir=args.input_dir,
                                     net1=net1,
                                     net4=net4,
                                     output_dir=args.output_dir,
                                     no_gpu=args.no_gpu)

    observer.schedule(event_handler, args.input_dir, recursive=True)
    observer.start()

    print("watchdog start...")

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()

    print("\nwatchdog stoped!")
Beispiel #3
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--imagenet-path', type=str, default='../../obfuscated_zoo/imagenet_val',
            help='path to the test_batch file from http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz')
    parser.add_argument('--start', type=int, default=0)
    parser.add_argument('--end', type=int, default=100)


    parser.add_argument('--no-gpu', action='store_true', default=False,
                        help='disables GPU training')
    args = parser.parse_args()


    tf = transforms.Compose([
        transforms.Scale([299, 299]),
        transforms.ToTensor()
    ])

    mean_torch = autograd.Variable(
        torch.from_numpy(np.array([0.485, 0.456, 0.406]).reshape([1, 3, 1, 1]).astype('float32')).cuda(), volatile=True)
    std_torch = autograd.Variable(
        torch.from_numpy(np.array([0.229, 0.224, 0.225]).reshape([1, 3, 1, 1]).astype('float32')).cuda(), volatile=True)
    mean_tf = autograd.Variable(
        torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1, 1]).astype('float32')).cuda(), volatile=True)
    std_tf = autograd.Variable(
        torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1, 1]).astype('float32')).cuda(), volatile=True)

    test_loss = 0
    correct = 0
    total = 0
    totalImages = 0
    succImages = 0
    faillist = []

    # set up TensorFlow session


    # initialize a model

    config, resmodel = get_model1()
    config, inresmodel = get_model2()
    config, incepv3model = get_model3()
    config, rexmodel = get_model4()
    net1 = resmodel.net
    net2 = inresmodel.net
    net3 = incepv3model.net
    net4 = rexmodel.net

    net1 = torch.nn.DataParallel(net1,device_ids=range(torch.cuda.device_count())).cuda()
    net2 = torch.nn.DataParallel(net2,device_ids=range(torch.cuda.device_count())).cuda()
    net3 = torch.nn.DataParallel(net3,device_ids=range(torch.cuda.device_count())).cuda()
    net4 = torch.nn.DataParallel(net4,device_ids=range(torch.cuda.device_count())).cuda()



    checkpoint = torch.load('../all_models/guided-denoiser/denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('../all_models/guided-denoiser/denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('../all_models/guided-denoiser/denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('../all_models/guided-denoiser/denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = torch.nn.DataParallel(inresmodel,device_ids=range(torch.cuda.device_count())).cuda()
        resmodel = torch.nn.DataParallel(resmodel,device_ids=range(torch.cuda.device_count())).cuda()
        incepv3model = torch.nn.DataParallel(incepv3model,device_ids=range(torch.cuda.device_count())).cuda()
        rexmodel = torch.nn.DataParallel(rexmodel,device_ids=range(torch.cuda.device_count())).cuda()
#
#         inresmodel = inresmodel.cuda()
#         resmodel = resmodel.cuda()
#         incepv3model = incepv3model.cuda()
#         rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()


    # initialize a data provider for CIFAR-10 images
    provider = ImageNet(args.imagenet_path, (299,299,3))

    target_list = [10,11,13,23,33,46,51,57,74,77,79,85,98,115,122,125]

    start = 150
    end = 950
    total = 0
    attacktime = 0
    imageno = []
    for i in range(start, end):
#         if i not in target_list:
#             continue
        success = False
        print('evaluating %d of [%d, %d)' % (i, start, end))
        inputs, targets= provider[i]
        modify = np.random.randn(1,3,32,32) * 0.001


        input_var = autograd.Variable(torch.from_numpy(inputs.transpose(2,0,1)).cuda(), volatile=True)
        input_tf = (input_var - mean_tf) / std_tf
        input_torch = (input_var - mean_torch) / std_torch

        logits1 = F.softmax(net1(input_torch,True)[-1],-1)
        logits2 = F.softmax(net2(input_tf,True)[-1],-1)
        logits3 = F.softmax(net3(input_tf,True)[-1],-1)
        logits4 = F.softmax(net4(input_torch,True)[-1],-1)

        logits = ((logits1+logits2+logits3+logits4).data.cpu().numpy())/4


        # print(logits)
        if np.argmax(logits) != targets:
            print('skip the wrong example ', i)
            print('max label {} , target label {}'.format(np.argmax(logits), targets))
            continue
        totalImages += 1
        episode_start =time.time()


        for runstep in range(200):

            step_start = time.time()
            Nsample = np.random.randn(npop, 3,32,32)

            modify_try = modify.repeat(npop,0) + sigma*Nsample
            temp = []
            resize_start =time.time()
            for x in modify_try:
                temp.append(cv2.resize(x.transpose(1,2,0), dsize=(299,299), interpolation=cv2.INTER_LINEAR).transpose(2,0,1))
            modify_try = np.array(temp)
#             print('resize time ', time.time()-resize_start,flush=True)
            #modify_try = cv2.resize(modify_try.transpose(0,2,3,1), dsize=(299, 299), interpolation=cv2.INTER_CUBIC).transpose(0,3,1,2)
            #print(modify_try.shape, flush=True)

            newimg = torch_arctanh((inputs-boxplus) / boxmul).transpose(2,0,1)

            inputimg = np.tanh(newimg+modify_try) * boxmul + boxplus
            if runstep % 10 == 0:
                temp = []
                for x in modify:
                    temp.append(cv2.resize(x.transpose(1,2,0), dsize=(299,299), interpolation=cv2.INTER_LINEAR).transpose(2,0,1))
                modify_test = np.array(temp)

                #modify_test = cv2.resize(modify.transpose(0,2,3,1), dsize=(299, 299), interpolation=cv2.INTER_CUBIC).transpose(0,3,1,2)
                realinputimg = np.tanh(newimg+modify_test) * boxmul + boxplus
                realdist = realinputimg - (np.tanh(newimg) * boxmul + boxplus)
                realclipdist = np.clip(realdist, -epsi, epsi)
                realclipinput = realclipdist + (np.tanh(newimg) * boxmul + boxplus)
                l2real =  np.sum((realclipinput - (np.tanh(newimg) * boxmul + boxplus))**2)**0.5
                #l2real =  np.abs(realclipinput - inputs.numpy())


#                 realclipinput = realclipinput.transpose(0, 2, 3, 1)
                realclipinput = np.squeeze(realclipinput)
                realclipinput = np.asarray(realclipinput,dtype = 'float32')


                # realclipinput_expand = []
                # for x in range(samples):
                #     realclipinput_expand.append(realclipinput)
                # realclipinput_expand = np.array(realclipinput_expand)

                input_var = autograd.Variable(torch.from_numpy(realclipinput).cuda(), volatile=True)
                input_tf = (input_var - mean_tf) / std_tf
                input_torch = (input_var - mean_torch) / std_torch

                logits1 = F.softmax(net1(input_torch, True)[-1],-1)
                logits2 = F.softmax(net2(input_tf, True)[-1],-1)
                logits3 = F.softmax(net3(input_tf, True)[-1],-1)
                logits4 = F.softmax(net4(input_torch, True)[-1],-1)

                logits = logits1 + logits2 + logits3 + logits4

                outputsreal = (logits.data.cpu().numpy()[0])/4


                print('probs ',np.sort(outputsreal)[-1:-6:-1])
                print('target label ', np.argsort(outputsreal)[-1:-6:-1])
                print('negative_probs ', np.sort(outputsreal)[0:3:1])
                sys.stdout.flush()
                # print(outputsreal)

                #print(np.abs(realclipdist).max())
                #print('l2real: '+str(l2real.max()))
                # print(outputsreal)
                if (np.argmax(outputsreal) != targets) and (np.abs(realclipdist).max() <= epsi):
                    attacktime += time.time()-episode_start
                    print('episode time : ', time.time()-episode_start)
                    print('atack time : ', attacktime)
                    succImages += 1
                    success = True
                    print('clipimage succImages: '+str(succImages)+'  totalImages: '+str(totalImages))
                    print('lirealsucc: '+str(realclipdist.max()))
                    sys.stdout.flush()
#                     imsave(folder+classes[targets[0]]+'_'+str("%06d" % batch_idx)+'.jpg',inputs.transpose(1,2,0))
                    break
            dist = inputimg - (np.tanh(newimg) * boxmul + boxplus)
            clipdist = np.clip(dist, -epsi, epsi)
            clipinput = (clipdist + (np.tanh(newimg) * boxmul + boxplus)).reshape(npop,3,299,299)
            target_onehot =  np.zeros((1,1000))


            target_onehot[0][targets]=1.


            input_start = time.time()
#             clipinput = clipinput.transpose(0, 2, 3, 1)
            clipinput = np.squeeze(clipinput)
            clipinput = np.asarray(clipinput,dtype = 'float32')
            # clipinput_expand = []
            # for x in range(samples):
            #     clipinput_expand.append(clipinput)
            # clipinput_expand = np.array(clipinput_expand)
            # clipinput_expand = clipinput_expand.reshape((samples * npop, 299, 299, 3))
#             clipinput = clipinput.reshape((npop, 299, 299, 3))

            input_var = autograd.Variable(torch.from_numpy(clipinput).cuda(), volatile=True)
            input_tf = (input_var - mean_tf) / std_tf
            input_torch = (input_var - mean_torch) / std_torch

            logits1 = F.softmax(net1(input_torch, True)[-1],-1)
            logits2 = F.softmax(net2(input_tf, True)[-1],-1)
            logits3 = F.softmax(net3(input_tf, True)[-1],-1)
            logits4 = F.softmax(net4(input_torch, True)[-1],-1)

            logits = logits1 + logits2 + logits3 + logits4

            outputs = (logits.data.cpu().numpy())/4
            
#             print('input_time : ', time.time()-input_start,flush=True)

            target_onehot = target_onehot.repeat(npop,0)

            outputs = np.log(outputs)
            real = (target_onehot * outputs).sum(1)
            other = ((1. - target_onehot) * outputs - target_onehot * 10000.).max(1)[0]

#             real = np.log((target_onehot * outputs).sum(1)+1e-30)
#             other = np.log(((1. - target_onehot) * outputs - target_onehot * 10000.).max(1)[0]+1e-30)

            loss1 = np.clip(real - other, 0.,1000)

            Reward = 0.5 * loss1
#             Reward = l2dist

            Reward = -Reward

            A = (Reward - np.mean(Reward)) / (np.std(Reward)+1e-7)


            modify = modify + (alpha/(npop*sigma)) * ((np.dot(Nsample.reshape(npop,-1).T, A)).reshape(3,32,32))
#             print('one step time : ', time.time()-step_start)
        if not success:
            faillist.append(i)
            print('failed: ',faillist)
#         print('episode time : ', time.time()-episode_start,flush=True)
    print(faillist)
    success_rate = succImages/float(totalImages)




    print('attack success rate: %.2f%% (over %d data points)' % (success_rate*100, args.end-args.start))
Beispiel #4
0
    def __init__(self):
        self._dataset = robustml.dataset.ImageNet(shape=(299, 299, 3))
        self._threat_model = robustml.threat_model.L2(epsilon=4 / 255)

        args = parser.parse_args()

        tf = transforms.Compose(
            [transforms.Scale([299, 299]),
             transforms.ToTensor()])

        self._mean_torch = autograd.Variable(torch.from_numpy(
            np.array([0.485, 0.456,
                      0.406]).reshape([1, 3, 1, 1]).astype('float32')).cuda(),
                                             volatile=True)
        self._std_torch = autograd.Variable(torch.from_numpy(
            np.array([0.229, 0.224,
                      0.225]).reshape([1, 3, 1, 1]).astype('float32')).cuda(),
                                            volatile=True)
        self._mean_tf = autograd.Variable(torch.from_numpy(
            np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                               1]).astype('float32')).cuda(),
                                          volatile=True)
        self._std_tf = autograd.Variable(torch.from_numpy(
            np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                               1]).astype('float32')).cuda(),
                                         volatile=True)

        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

        checkpoint = torch.load('denoise_res_015.ckpt')
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            resmodel.load_state_dict(checkpoint['state_dict'])
        else:
            resmodel.load_state_dict(checkpoint)

        checkpoint = torch.load('denoise_inres_014.ckpt')
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            inresmodel.load_state_dict(checkpoint['state_dict'])
        else:
            inresmodel.load_state_dict(checkpoint)

        checkpoint = torch.load('denoise_incepv3_012.ckpt')
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            incepv3model.load_state_dict(checkpoint['state_dict'])
        else:
            incepv3model.load_state_dict(checkpoint)

        checkpoint = torch.load('denoise_rex_001.ckpt')
        if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
            rexmodel.load_state_dict(checkpoint['state_dict'])
        else:
            rexmodel.load_state_dict(checkpoint)

        if not args.no_gpu:
            inresmodel = inresmodel.cuda()
            resmodel = resmodel.cuda()
            incepv3model = incepv3model.cuda()
            rexmodel = rexmodel.cuda()
        inresmodel.eval()
        resmodel.eval()
        incepv3model.eval()
        rexmodel.eval()

        self._net1 = net1
        self._net2 = net2
        self._net3 = net3
        self._net4 = net4
def defense_denoise_14(input_dir, batch_size, no_gpu):
    print('Running defense: Randome_denoise_14')

    if not os.path.exists(input_dir):
        print("Error: Invalid input folder %s" % input_dir)
        exit(-1)

    tf = transforms.Compose(
        [transforms.Resize([299, 299]),
         transforms.ToTensor()])

    with torch.no_grad():
        mean_torch = autograd.Variable(
            torch.from_numpy(
                np.array([0.485, 0.456,
                          0.406]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda())
        std_torch = autograd.Variable(
            torch.from_numpy(
                np.array([0.229, 0.224,
                          0.225]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda())
        mean_tf = autograd.Variable(
            torch.from_numpy(
                np.array([0.5, 0.5,
                          0.5]).reshape([1, 3, 1,
                                         1]).astype('float32')).cuda())
        std_tf = autograd.Variable(
            torch.from_numpy(
                np.array([0.5, 0.5,
                          0.5]).reshape([1, 3, 1,
                                         1]).astype('float32')).cuda())

        dataset = Dataset(input_dir, transform=tf)
        loader = data.DataLoader(dataset, batch_size=batch_size, shuffle=False)

        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    final_labels = {}
    outputs = []
    for batch_idx, (input, _) in enumerate(loader):
        if not no_gpu:
            input = input.cuda()
        with torch.no_grad():
            input_var = autograd.Variable(input)
            input_tf = (input_var - mean_tf) / std_tf
            input_torch = (input_var - mean_torch) / std_torch

            labels1 = net1(input_torch, True)[-1]
            # labels2 = net2(input_tf,True)[-1]
            # labels3 = net3(input_tf,True)[-1]
            labels4 = net4(input_torch, True)[-1]

            labels = (labels1 + labels4).max(
                1
            )[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
        outputs.append(labels.data.cpu().numpy())
    outputs = np.concatenate(outputs, axis=0)

    filenames = dataset.filenames()
    filenames = [os.path.basename(ii) for ii in filenames]
    final_labels.update(dict(zip(filenames, outputs)))

    return final_labels
Beispiel #6
0
def main():
    start_time = time.time()  

    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
        
    tf = transforms.Compose([
           transforms.Resize([args.img_size,args.img_size]),
            transforms.ToTensor()
    ])

    tf_flip = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ToTensor()
    ])  

    tf_shrink = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize([args.img_size,args.img_size]),
        transforms.ToTensor()
    ])

    with torch.no_grad():
        mean_torch = autograd.Variable(torch.from_numpy(np.array([0.485, 0.456, 0.406]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_torch = autograd.Variable(torch.from_numpy(np.array([0.229, 0.224, 0.225]).reshape([1,3,1,1]).astype('float32')).cuda())
        mean_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())

        dataset = Dataset(args.input_dir, transform=tf)
        loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
    
        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net    
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)
    
    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()


    #inceptionresnetv2 for ramdon padding
    model = inceptionresnetv2(num_classes=1001, pretrained='imagenet+background')
    model = model.cuda()
    model.eval()

    labels_denoise = {}
    labels_random = {}
    denoise_outputs = []
    random_outputs = []
    for batch_idx, (input, _) in enumerate(loader):
        # Random padding
        # bilateral filtering
        temp_numpy = input.data.numpy()
        temp_numpy = np.reshape(temp_numpy, (3, 299, 299))
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = cv2.bilateralFilter(temp_numpy,6,50,50)
        temp_numpy = np.moveaxis(temp_numpy, -1, 0)
        temp_numpy = np.reshape(temp_numpy, (1, 3, 299, 299))
        input00 = torch.from_numpy(temp_numpy)
        length_input, _, _, _ = input.size()
        iter_labels = np.zeros([length_input, 1001, args.itr])
        for j in range(args.itr):
            # random fliping
            input0 = batch_transform(input00, tf_flip, 299)
            # random resizing
            resize_shape_ = random.randint(310, 331)
            image_resize = 331
            tf_rand_resize = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize([resize_shape_, resize_shape_]),
                transforms.ToTensor()
            ]) 
            input1 = batch_transform(input0, tf_rand_resize, resize_shape_)

            # ramdom padding
            shape = [random.randint(0, image_resize - resize_shape_), random.randint(0, image_resize - resize_shape_), image_resize]
            # print(shape)
       
            new_input = padding_layer_iyswim(input1, shape, tf_shrink)
            #print(type(new_input))
            if not args.no_gpu:
                new_input = new_input.cuda()
            with torch.no_grad():
                input_var = autograd.Variable(new_input)
                logits = model(input_var)
                labels = logits.max(1)[1]
                labels_index = labels.data.tolist() 
                print(len(labels_index))
                iter_labels[range(len(iter_labels)), labels_index, j] = 1
        final_labels = np.sum(iter_labels, axis=-1)
        labels = np.argmax(final_labels, 1)
        print(labels)
        random_outputs.append(labels)  
        
        # Denoise
        if not args.no_gpu:
            input = input.cuda()
        with torch.no_grad():
            input_var = autograd.Variable(input)
            input_tf = (input_var-mean_tf)/std_tf
            input_torch = (input_var - mean_torch)/std_torch
        
            labels1 = net1(input_torch,True)[-1]
            # labels2 = net2(input_tf,True)[-1]
            # labels3 = net3(input_tf,True)[-1]
            labels4 = net4(input_torch,True)[-1]

            labels = (labels1+labels4).max(1)[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
        denoise_outputs.append(labels.data.cpu().numpy())

        
          
    denoise_outputs = np.concatenate(denoise_outputs, axis=0)
    random_outputs = np.concatenate(random_outputs, axis=0)

    filenames = dataset.filenames()
    filenames = [ os.path.basename(ii) for ii in filenames ]
    labels_denoise.update(dict(zip(filenames, denoise_outputs)))
    labels_random.update(dict(zip(filenames, random_outputs)))

    # diff filtering
    print('diff filtering...')
    if (len(labels_denoise) == len(labels_random)):
        # initializing 
        final_labels = labels_denoise
        # Compare
        diff_index = [ii for ii in labels_denoise if labels_random[ii] != labels_denoise[ii]]
        if (len(diff_index) != 0):
            # print(diff_index)
            for index in diff_index:
                final_labels[index] = 0
    else:
        print("Error: Number of labels returned by two defenses doesn't match")
        exit(-1)
    
    elapsed_time = time.time() - start_time
    print('elapsed time: {0:.0f} [s]'.format(elapsed_time))

    with open(args.output_file, 'w') as out_file:
        for filename, label in final_labels.items():
            kmean = auxkmean(64, 10)
            kmean.importmodel()
            kmean_img = args.input_dir + '/' + filename
            kmean_label = kmean.compare(kmean_img,label)
            out_file.write('{0},{1}\n'.format(filename, kmean_label))    
def main():
    start_time = time.time()

    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not args.output_dir:
        print("Error: Please specify an output directory")
        exit(-1)

    trans_forms = transforms.Compose([
           transforms.Resize([299,299]),
            transforms.ToTensor()
    ])

    with torch.no_grad():
        mean_torch = autograd.Variable(torch.from_numpy(np.array([0.485, 0.456, 0.406]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_torch = autograd.Variable(torch.from_numpy(np.array([0.229, 0.224, 0.225]).reshape([1,3,1,1]).astype('float32')).cuda())
        mean_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())
        std_tf = autograd.Variable(torch.from_numpy(np.array([0.5, 0.5, 0.5]).reshape([1,3,1,1]).astype('float32')).cuda())

        dataset = Dataset(args.input_dir, transform=trans_forms)
        #loader = data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
        loader = data.DataLoader(dataset, batch_size=1, shuffle=False)

        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    outputs = []
    filenames = dataset.filenames()
    all_images_taget_class = load_target_class(args.input_dir)
    #print('filenames = {0}'.format(filenames))
    for batch_idx, (input, _) in enumerate(loader):
        #print('input = {0}'.format(input.data.numpy().shape))
        #print('batch_idx = {0}'.format(batch_idx))
        filenames_batch = get_filenames_batch(filenames, batch_idx, args.batch_size)
        filenames_batch = [n.split(r"/")[-1] for n in filenames_batch]
        print('filenames = {0}'.format(filenames_batch))

        target_class_for_batch = (
            [all_images_taget_class[n] - 1 for n in filenames_batch]
            + [0] * (args.batch_size - len(filenames_batch))) # all_images_taget_class[n] - 1 to match imagenet label 1001 classes
        print('target_class_for_batch = {0}'.format(target_class_for_batch))

        #labels1 = net1(input_torch,True)[-1]
        #labels2 = net2(input_tf,True)[-1]
        #labels3 = net3(input_tf,True)[-1]
        #labels4 = net4(input_torch,True)[-1]
        #labels = (labels1+labels2+labels3+labels4).max(1)[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
        #print('labels1.shape = ', labels1.data.cpu().numpy().shape) # looks like labels1.data.cpu().numpy can be used as logits
        #print('labels1', labels1.data.cpu().numpy())

        loss = nn.CrossEntropyLoss()
        #label = 924
        step_alpha = 0.01
        eps = args.max_epsilon / 255.0 # input in now in [0, 1]
        target_label = torch.Tensor(target_class_for_batch).long().cuda()
        #print('input.cpu().numpy().amax = {0}'.format(np.amax(input.cpu().numpy()))) #1.0
        #print('input.cpu().numpy().amin = {0}'.format(np.amin(input.cpu().numpy()))) #0.0
        #raise ValueError('hold')
        if not args.no_gpu:
            input = input.cuda()
        input_var = autograd.Variable(input, requires_grad=True)
        orig_images = input.cpu().numpy()
        y = autograd.Variable(target_label)
        for step in range(args.num_iter):

            input_tf = (input_var-mean_tf)/std_tf
            input_torch = (input_var - mean_torch)/std_torch
            #input_tf = autograd.Variable(input_tf, requires_grad=True)
            #input_torch = autograd.Variable(input_torch, requires_grad=True)

            zero_gradients(input_tf)
            zero_gradients(input_torch)

            out = net1(input_torch,True)[-1]
            out += net2(input_tf,True)[-1]
            out += net3(input_tf,True)[-1]
            out += net4(input_torch,True)[-1]
            pred = out.max(1)[1] + 1
            if step % 10 == 0:
                print('pred = {0}'.format(pred))
            _loss = loss(out, y)
            #_loss = autograd.Variable(_loss)
            _loss.backward()
            #print('type of input = ', type(input_torch))
            #print('type of input.grad = ', type(input_torch.grad))
            normed_grad = step_alpha * torch.sign(input_var.grad.data)
            step_adv = input_var.data - normed_grad
            adv = step_adv - input.data
            adv = torch.clamp(adv, -eps, eps)
            result = input.data + adv
            result = torch.clamp(result, 0, 1.0)
            input_var.data = result

        adv_image = result.cpu().numpy()
        #_ = _get_diff_img(adv_image, orig_images) # check max diff
        save_images(adv_image, get_filenames_batch(filenames, batch_idx, args.batch_size), args.output_dir)

    elapsed_time = time.time() - start_time
    print('elapsed time: {0:.0f} [s]'.format(elapsed_time))
Beispiel #8
0
def main(_):
    print('Loading denoise...')
    with torch.no_grad():
        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net    
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)
    
    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    inresmodel = inresmodel.cuda()
    resmodel = resmodel.cuda()
    incepv3model = incepv3model.cuda()
    rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    # random padding
    print('Loading random padding...')
    print('Iteration: %d' % FLAGS.itr_time)
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default():
        # Prepare graph
        x_input = tf.placeholder(tf.float32, shape=batch_shape)
        img_resize_tensor = tf.placeholder(tf.int32, [2])
        x_input_resize = tf.image.resize_images(x_input, img_resize_tensor, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        shape_tensor = tf.placeholder(tf.int32, [3])
        padded_input = padding_layer_iyswim(x_input_resize, shape_tensor)
        # 330 is the last value to keep 8*8 output, 362 is the last value to keep 9*9 output, stride = 32
        padded_input.set_shape(
            (FLAGS.batch_size, FLAGS.image_resize, FLAGS.image_resize, 3))

        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            _, end_points = inception_resnet_v2.inception_resnet_v2(
                padded_input, num_classes=num_classes, is_training=False, create_aux_logits=True)

        predicted_labels = tf.argmax(end_points['Predictions'], 1)

        # Run computation
        saver = tf.train.Saver(slim.get_model_variables())
        session_creator = tf.train.ChiefSessionCreator(
            scaffold=tf.train.Scaffold(saver=saver),
            checkpoint_filename_with_path=FLAGS.checkpoint_path,
            master=FLAGS.master)

        with tf.train.MonitoredSession(session_creator=session_creator) as sess:
            ''' watch the input dir for defense '''
            observer = Observer()
            event_handler = FileEventHandler(batch_shape=batch_shape,
                                             sess=sess,
                                             end_points=end_points,
                                             x_input=x_input,
                                             img_resize_tensor=img_resize_tensor,
                                             shape_tensor=shape_tensor,
                                             output_dir=FLAGS.output_dir,
                                             itr=FLAGS.itr_time,
                                             img_resize=FLAGS.image_resize,
                                             net1=net1,
                                             net4=net4)

            observer.schedule(event_handler, FLAGS.input_dir, recursive=True)
            observer.start()

            print("watchdog start...")

            try:
                while True:
                    time.sleep(0.5)
            except KeyboardInterrupt:
                observer.stop()
            observer.join()

            print("\nwatchdog stoped!")
Beispiel #9
0
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not args.output_file:
        print("Error: Please specify an output file")
        exit(-1)

    tf = transforms.Compose(
        [transforms.Scale([299, 299]),
         transforms.ToTensor()])

    mean_torch = autograd.Variable(
        torch.from_numpy(
            np.array([0.485, 0.456,
                      0.406]).reshape([1, 3, 1, 1]).astype('float32')).cuda())
    std_torch = autograd.Variable(
        torch.from_numpy(
            np.array([0.229, 0.224,
                      0.225]).reshape([1, 3, 1, 1]).astype('float32')).cuda())
    mean_tf = autograd.Variable(
        torch.from_numpy(
            np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                               1]).astype('float32')).cuda())
    std_tf = autograd.Variable(
        torch.from_numpy(
            np.array([0.5, 0.5, 0.5]).reshape([1, 3, 1,
                                               1]).astype('float32')).cuda())

    dataset = Dataset(args.input_dir, transform=tf)
    loader = data.DataLoader(dataset, batch_size=1, shuffle=False)

    config, resmodel = get_model1()
    config, inresmodel = get_model2()
    config, incepv3model = get_model3()
    config, rexmodel = get_model4()
    net1 = resmodel.net
    net2 = inresmodel.net
    net3 = incepv3model.net
    net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    xent = torch.nn.CrossEntropyLoss()

    filenames = dataset.filenames()
    targets = []
    outputs = []
    for i, (input, _) in enumerate(loader):
        orig = input.numpy()
        print(orig.shape)
        adv = np.copy(orig)
        lower = np.clip(orig - 4.0 / 255.0, 0, 1)
        upper = np.clip(orig + 4.0 / 255.0, 0, 1)
        target_label = np.random.randint(0, 1000)
        targets.append(target_label)
        target = autograd.Variable(
            torch.LongTensor(np.array([target_label - 1])).cuda())
        print('image %d of %d' % (i + 1, len(filenames)))
        for step in range(100):
            # XXX this usually finishes in a very small number of steps, and we
            # could return early in those cases, but I'm too lazy to write the
            # two lines of code it would take to do this
            input_var = autograd.Variable(torch.FloatTensor(adv).cuda(),
                                          requires_grad=True)
            input_tf = (input_var - mean_tf) / std_tf
            input_torch = (input_var - mean_torch) / std_torch

            #clean1 = net1.denoise[0](input_torch)
            #clean2 = net2.denoise[0](input_tf)
            #clean3 = net3.denoise(input_tf)

            #labels1 = net1(clean1,False)[-1]
            #labels2 = net2(clean2,False)[-1]
            #labels3 = net3(clean3,False)[-1]

            labels1 = net1(input_torch, True)[-1]
            labels2 = net2(input_tf, True)[-1]
            labels3 = net3(input_tf, True)[-1]
            labels4 = net4(input_torch, True)[-1]

            labels = (labels1 + labels2 + labels3 + labels4)
            loss = xent(labels, target)
            loss.backward()
            adv = adv - 1.0 / 255.0 * np.sign(
                input_var.grad.data.cpu().numpy())
            adv = np.clip(adv, lower, upper)

            labels = (labels1 + labels2 + labels3 + labels4).max(
                1
            )[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids
            print('  step = %d, loss = %g, target = %d, label = %d' %
                  (step + 1, loss, target_label, labels))
        outputs.append(labels.data.cpu().numpy())
        name = os.path.splitext(os.path.basename(filenames[i]))[0] + '.png'
        out_path = os.path.join(args.output_dir, name)
        scipy.misc.imsave(out_path, np.transpose(adv[0], (1, 2, 0)))

    outputs = np.concatenate(outputs, axis=0)

    with open(args.output_file, 'w') as out_file:
        for filename, target, label in zip(filenames, targets, outputs):
            filename = os.path.basename(filename)
            out_file.write('{0},{1},{2}\n'.format(filename, target, label))
Beispiel #10
0
def main():
    start_time = time.time()

    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not args.output_file:
        print("Error: Please specify an output file")
        exit(-1)

    tf = transforms.Compose(
        [transforms.Resize([299, 299]),
         transforms.ToTensor()])

    tf_flip = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ToTensor()
    ])

    tf_shrink = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize([299, 299]),
        transforms.ToTensor()
    ])

    with torch.no_grad():
        mean_torch = autograd.Variable(
            torch.from_numpy(
                np.array([0.485, 0.456,
                          0.406]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda())
        std_torch = autograd.Variable(
            torch.from_numpy(
                np.array([0.229, 0.224,
                          0.225]).reshape([1, 3, 1,
                                           1]).astype('float32')).cuda())
        mean_tf = autograd.Variable(
            torch.from_numpy(
                np.array([0.5, 0.5,
                          0.5]).reshape([1, 3, 1,
                                         1]).astype('float32')).cuda())
        std_tf = autograd.Variable(
            torch.from_numpy(
                np.array([0.5, 0.5,
                          0.5]).reshape([1, 3, 1,
                                         1]).astype('float32')).cuda())

        dataset = Dataset(args.input_dir, transform=tf)
        loader = data.DataLoader(dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False)

        config, resmodel = get_model1()
        config, inresmodel = get_model2()
        config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        net2 = inresmodel.net
        net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_inres_014.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        inresmodel.load_state_dict(checkpoint['state_dict'])
    else:
        inresmodel.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_incepv3_012.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        incepv3model.load_state_dict(checkpoint['state_dict'])
    else:
        incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    inresmodel.eval()
    resmodel.eval()
    incepv3model.eval()
    rexmodel.eval()

    outputs = []
    iter = args.iteration
    # print(iter)
    for batch_idx, (input, _) in enumerate(loader):
        # print(input.size())
        length_input, _, _, _ = input.size()
        iter_labels = np.zeros([length_input, 1001, iter])
        for j in range(iter):
            # random fliping
            input0 = batch_transform(input, tf_flip, 299)
            # random resizing
            resize_shape_ = random.randint(310, 331)
            image_resize = 331
            tf_rand_resize = transforms.Compose([
                transforms.ToPILImage(),
                transforms.Resize([resize_shape_, resize_shape_]),
                transforms.ToTensor()
            ])
            input1 = batch_transform(input0, tf_rand_resize, resize_shape_)

            # ramdom padding
            shape = [
                random.randint(0, image_resize - resize_shape_),
                random.randint(0, image_resize - resize_shape_), image_resize
            ]
            # print(shape)

            new_input = padding_layer_iyswim(input1, shape, tf_shrink)
            #print(type(new_input))

            if not args.no_gpu:
                new_input = new_input.cuda()
            with torch.no_grad():
                input_var = autograd.Variable(new_input)
                input_tf = (input_var - mean_tf) / std_tf
                input_torch = (input_var - mean_torch) / std_torch

                labels1 = net1(input_torch, True)[-1]
                labels2 = net2(input_tf, True)[-1]
                labels3 = net3(input_tf, True)[-1]
                labels4 = net4(input_torch, True)[-1]

                labels = (labels1 + labels2 + labels3 + labels4).max(
                    1
                )[1] + 1  # argmax + offset to match Google's Tensorflow + Inception 1001 class ids

                labels_index = labels.data.tolist()
                #if (len(labels_index) % args.batch_size != 0):
                #    zeros = [0]* (args.batch_size - len(labels_index) % args.batch_size)
                #    labels_index = labels_index + zeros
                print(len(labels_index))
                #iter_labels[range(len(iter_labels)),m, j] = 1 for m in labels_index
                iter_labels[range(len(iter_labels)), labels_index, j] = 1
        final_labels = np.sum(iter_labels, axis=-1)
        labels = np.argmax(final_labels, 1)
        print(labels)
        outputs.append(labels)
    outputs = np.concatenate(outputs, axis=0)

    with open(args.output_file, 'w') as out_file:
        filenames = dataset.filenames()
        for filename, label in zip(filenames, outputs):
            filename = os.path.basename(filename)
            out_file.write('{0},{1}\n'.format(filename, label))

    elapsed_time = time.time() - start_time
    print('elapsed time: {0:.0f} [s]'.format(elapsed_time))
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not os.path.exists(args.output_dir):
        print("Error: Invalid output folder %s" % args.output_dir)
        exit(-1)

    with torch.no_grad():
        config, resmodel = get_model1()
        #config, inresmodel = get_model2()
        #config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        #net2 = inresmodel.net
        #net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_inres_014.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
    #inresmodel.load_state_dict(checkpoint['state_dict'])
    #else:
    #inresmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_incepv3_012.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
    #incepv3model.load_state_dict(checkpoint['state_dict'])
    #else:
    #incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        #inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        #incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    #inresmodel.eval()
    resmodel.eval()
    #incepv3model.eval()
    rexmodel.eval()

    #inceptionresnetv2 for ramdon padding
    model = inceptionresnetv2(num_classes=1001,
                              pretrained='imagenet+background')
    model = model.cuda()
    model.eval()

    # Load kmean
    kmean = auxkmean(64, 10)
    kmean.importmodel()
    ''' watch the input dir for defense '''
    observer = Observer()
    event_handler = FileEventHandler(batch_size=args.batch_size,
                                     input_dir=args.input_dir,
                                     net1=net1,
                                     net4=net4,
                                     model=model,
                                     itr=args.itr,
                                     output_dir=args.output_dir,
                                     no_gpu=args.no_gpu,
                                     kmean=kmean)

    observer.schedule(event_handler, args.input_dir, recursive=True)
    observer.start()

    print("watchdog start...")

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()

    print("\nwatchdog stoped!")