Exemplo n.º 1
0
 def __init__(self, data, args_params):
     self.data = data.clone()
     self.full_graph = data.clone()
     self.num_nodes = self.data.x.size()[0]
     self.num_edges = (
         self.data.edge_index_train[0].shape[0]
         if hasattr(self.data, "edge_index_train")
         else self.data.edge_index[0].shape[0]
     )
Exemplo n.º 2
0
    def get_batch(self):
        def get_rpn_testbatch(roidbs, cfg):
            data = []
            label = {}
            im_info = []

            blobs = get_minibatch(roidbs, self._num_classes)
            data = torch.from_numpy(blobs['data'])
            im_info = torch.from_numpy(blobs['im_info'])

            data_height, data_width = data.size(1), data.size(2)

            data = data.permute(0, 3, 1,
                                2).contiguous().view(3, data_height,
                                                     data_width)
            im_info = im_info.view(3)

            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0

            return data, label, im_info, gt_boxes, num_boxes

        # self.cur_roidb_index = index
        cur_roidb = self._roidb[self.cur_roidb_index].copy()
        cur_roidb['image'] = cur_roidb['pattern'] % self.cur_frameid
        self.cur_seg_len = cur_roidb['frame_seg_len']
        # print('frame_seg_len', cur_roidb['frame_seg_len'], 'image path', cur_roidb['image'])
        data, label, im_info, gt_boxes, num_boxes = get_rpn_testbatch(
            [cur_roidb], self.cfg)
        if self.key_frameid == self.cur_frameid:  # key frame
            # self.data_key = data[0]['data'].copy()
            self.data_key = data.clone()
            if self.key_frameid == 0:
                self.key_frame_flag = 0
            else:
                self.key_frame_flag = 1
        else:
            self.key_frame_flag = 2
        # extend_data = [{'data': data[0]['data'],
        #                 'im_info': data[0]['im_info'],
        extend_data = [{
            'data':
            data,
            'im_info':
            im_info,
            'gt_boxes':
            gt_boxes,
            'num_boxes':
            num_boxes,
            'data_key':
            self.data_key,
            'feat_key':
            torch.from_numpy(np.zeros(
                (1, self.cfg.network.APN_FEAT_DIM, 1, 1)))
        }]
        # print('len(data)', len(data))
        # self.data = [[extend_data[i][name] for name in self.data_name] for i in range(len(data))]
        self.data = [{name: extend_data[i][name]
                      for name in self.data_name} for i in range(1)]
        self.im_info = im_info
Exemplo n.º 3
0
def poison(data, mode='train'):
    black = -1
    white = 1
    if TARGET_DATASET_NAME == 'gtsrb':
        black = (1 - 0.31) / 0.28
        white = (-1 - 0.31) / 0.28

    poison_data = data.clone()

    for j in range(data.shape[0]):
        for a in range(args.d):
            for b in range(args.d):
                if (a + b) % 2 == 1:
                    poison_data[j, 25 + a, 25 + b] = white
                elif (a + b) % 2 == 0:
                    poison_data[j, 25 + a, 25 + b] = black

    # print(data.shape)
    # print(poison_data.shape)
    '''
    # if args.p and args.s:
        # _data = torch.cat((data, poison_data), dim=0)
        # _label = torch.cat((label, poison_label), dim=0)
    # elif (args.p and not args.s) or mode == 'test':
        # _data = poison_data
        # _label = poison_label
    '''
    return poison_data
Exemplo n.º 4
0
def generate_training_data(data, mask):
    train_data = data.clone()
    #train_data = train_data.view(train_data.size(0),-1)
    #targets = data.view(data.size(0),-1)
    #pixels = [x for x  in range(tdata.size(1)) if not x in pixels_to_fill]
    #train_data[:, pixels_to_fill]=0
    targets = train_data.clone()
    for i in range(train_data.size(0)):
        train_data[i, :, :] = train_data[i, :, :] * mask
    targets = targets.view(train_data.size(0), 1, train_data.size(1), -1)
    train_data = train_data.view(train_data.size(0), 1, train_data.size(1), -1)
    # normalization
    mu = train_data.mean()
    sigma = train_data.std()

    def norm_data(x, back=False, getmu=False, getsigma=False):
        if getmu:
            return mu
        if getsigma:
            return sigma
        if back:
            return (x * sigma) + mu
        else:
            return (x - mu) / sigma

    train_data = norm_data(train_data)
    targets = norm_data(targets)
    train = torch.utils.data.TensorDataset(train_data, targets)
    return train, norm_data
Exemplo n.º 5
0
    def __init__(self, data, args_params):
        self.data = data.clone()
        self.full_graph = data.clone()
        self.num_nodes = self.data.x.size()[0]
        self.num_edges = (self.data.edge_index_train[0].shape[0] if hasattr(
            self.data, "edge_index_train") else
                          self.data.edge_index[0].shape[0])

        self.gen_adj()

        self.train_mask = self.data.train_mask.cpu().numpy()
        self.node_train = np.arange(1, self.num_nodes + 1) * self.train_mask
        self.node_train = self.node_train[self.node_train != 0] - 1

        self.sample_coverage = args_params["sample_coverage"]
        self.preprocess()
Exemplo n.º 6
0
def occludeimg_with_center(data, center):
    # Randomily choose a center that is on the object
    # of the image and mask out a 6*6 square around it
    top, bot, left, right = center
    occluded = data.clone()
    occluded[0, top:bot, left:right] = 0.0
    return occluded
Exemplo n.º 7
0
def masking_noise(data, frac):
    """
    data: Tensor
    frac: fraction of unit to be masked out
    """
    data_noise = data.clone()
    rand = torch.rand(data.size())
    data_noise[rand < frac] = 0
    return data_noise
def cutmix(data, target, alpha):
    indices = torch.randperm(data.size(0))
    shuffled_data = data[indices]
#     shuffled_target = target[indices]

    lam = np.clip(np.random.beta(alpha, alpha),0.3,0.4)
    bbx1, bby1, bbx2, bby2 = rand_bbox(data.size(), lam)
    new_data = data.clone()
    new_data[:, :, bby1:bby2, bbx1:bbx2] = data[indices, :, bby1:bby2, bbx1:bbx2]
    # adjust lambda to exactly match pixel ratio
    lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (data.size()[-1] * data.size()[-2]))
#     targets = (target, shuffled_target, lam)

    return new_data, target
Exemplo n.º 9
0
def compute(data):
    global W, B

    buffer = data.clone()

    for i in range(0, len(net_shape)-1):
        buffer = torch.matmul(buffer, W[i]) + B[i]

        if net_f[i] != 0:
            buffer = net_f[i](buffer)

    result = buffer[:, 0]
    #result = torch.prod(buffer, dim=1)
    return result
Exemplo n.º 10
0
def occludeimg_and_returncenter(data):
    # Randomily choose a center that is on the object
    # of the image and mask out a 6*6 square around it
    occluded = data.clone()
    size = occluded.size()
    nonzeros = torch.nonzero(occluded[0])
    choice = random.randint(0, len(nonzeros)-1)
    row = nonzeros[choice][0]
    col = nonzeros[choice][1]
    left = max(0, col-half_occludesize)
    right = min(27, col+half_occludesize)
    top = max(0, row-half_occludesize)
    bot = min(27, row+half_occludesize)
    occluded[0, top:bot, left:right] = 0.0
    return occluded, (top, bot, left, right)
Exemplo n.º 11
0
def trans_RGB_bicubic(data):
    up = args.upscale
    ims_np = (data.clone()*255.).permute(0, 2, 3, 1).numpy().astype(np.uint8)

    hr_size = ims_np.shape[1]

    lr_size = hr_size // up

    rgb_hrs = data.new().resize_(data.size(0), 3, hr_size, hr_size).zero_()

    for i, im_np in enumerate(ims_np):
        im = Image.fromarray(im_np, 'RGB')
        rgb_lr = Resize((lr_size, lr_size), Image.BICUBIC)(im)
        rgb_hr = Resize((hr_size, hr_size), Image.BICUBIC)(rgb_lr)
        rgb_hr = ToTensor()(rgb_hr)
        rgb_hrs[i].copy_(rgb_hr)
    return rgb_hrs
Exemplo n.º 12
0
def generate_adv_images(model, device, kwargs):
    model.eval()

    # forSoftMaxList = torch.cuda.FloatTensor()
    sfMax = []

    targeted_class_labelsList = []
    image_namesList = []
    maxImages = 10
    advImageID = 1
    targetList=[]
    targetDict={}
    mean = (0.1307,)
    std = (0.3081,)

    with torch.enable_grad():

        path = 'D:/MComp/CS5260 Deep Learning and NN-2/Assignment_1/Assignment_1/data/'
        dataLoader = torch.utils.data.DataLoader(
            torchvision.datasets.DatasetFolder(path,
                                                loader=numpy_loader,
                                                extensions='.npy',
                                                transform=transforms.Compose([transforms.ToTensor(),
                                                transforms.Normalize(mean, std)])),
                                                batch_size=100, **kwargs)   #hardcoding batch size to load the class one by one

        given_dataset = []
        for data1, target1 in dataLoader:
            data1, target1 = data1.to(device), target1.to(device)
            if len(given_dataset) ==0:
                given_dataset = data1.squeeze().detach().cpu().numpy()
            else:
                given_dataset = np.concatenate([given_dataset, data1.squeeze().detach().cpu().numpy()],
                                           axis=0)

        epsilon =0.6
        iterNum =[60, 100, 50, 84, 50, 70, 70, 82, 50, 60]
        epsilonarr = [0.81, 0.65, 0.8, 0.6, 0.8, 0.81, 0.8, 0.81, 1.0, 0.81]

        for i in range(1):

            adv_images = []
            targeted_class_labels = []
            image_names = []
            print("Epsilon Value: " + str(epsilonarr))
            print("Iteration number : " + str(iterNum))
            # epsilonarr = np.ones(10)
            # epsilonarr = epsilonarr * epsilon

            targetID = 0
            totalSoftmaxList = []
            maxIndex=[]
            Sval=0
            for labelCount in range(10):
                # epsilonarr[1]=0.65
                # if labelCount == 1:
                #     iterNum=40
                adv_imagesList = torch.cuda.FloatTensor()
                softMaxList = []


                for data_f, target_f in dataLoader:
                    data, target = data_f.to(device), target_f.to(device)
                    targetCopy = torch.LongTensor(100,).zero_().to(device)
                    originalLabel = target[0].item()
                    if (originalLabel == labelCount) | (epsilonarr[labelCount] == 0):
                        # targetID+=1
                        continue
                    data.requires_grad = True

                    targetCopy += targetID

                    for name, param in model.named_parameters():
                        # print(name, param.requires_grad)
                        param.requires_grad = False
                    maxim = (1.0 - mean[0])/std[0]
                    minim = (0.0 - mean[0])/std[0]

                    def clip(data_tensor, minimum, maximum):
                        return torch.autograd.Variable(torch.clamp(data_tensor, min=minimum, max=maximum), requires_grad=True)
                        # return torch.clamp(data_tensor, min=minimum, max=maximum)

                    def norm_custom(data):
                        data_unnorm = data * std[0] + mean[0]
                        maxim = data_unnorm.max(axis=1)[0].max()
                        minim = data_unnorm.min(axis=1)[0].min()
                        return torch.autograd.Variable((data_unnorm - minim) / (maxim - minim), requires_grad=True)

                    # data_norm = clip(data, minim, maxim)
                    data_norm = torch.autograd.Variable(data.clone(), requires_grad=True)
                    iter = 0

                    while iter <= iterNum[labelCount]:
                        # print(iter+1, '->', data_norm.requires_grad)
                        model.zero_grad()
                        data_norm.grad = None
                        output = model.forward(data_norm)
                        outSoftMax = F.softmax(output, dim=1)
                        loss = F.cross_entropy(outSoftMax, targetCopy)
                        loss.backward()
                        gradient = data_norm.grad.data.sign()
                        data_norm.data = data_norm.data - epsilonarr[labelCount] * gradient
                        data_norm = clip(data_norm, minim, maxim)

                        ##Cancel comment out for stopping iter if already 10 values above 0.8

                        # out_max1 = outSoftMax.max(axis=1)
                        # indices1 = out_max1[0].detach().cpu().numpy().argsort()
                        # out_arg_max1 = out_max1[1].cpu().numpy()
                        # idx1 = 99
                        # image_iter1 = 0
                        # counter = 0
                        # while image_iter1 < 5 and idx1 >= 0:
                        #     if (out_arg_max1[indices1[idx1].item()].item() == targetCopy[0].item()) & (out_max1[0][indices1[idx1].item()].item() >= 0.8) :
                        #         image_iter1 +=1
                        #     idx1-=1
                        # if (image_iter1 == 3):
                        #     print("Iteration stopped for " + str(originalLabel) + " at iter " + str(iter))
                        #     break

                        iter += 1

                    # data_norm = data.detach().clone().type(torch.cuda.IntTensor).type(torch.cuda.FloatTensor)
                    # data_norm = norm_custom(data)
                    with torch.no_grad():
                        output = model(data_norm)
                    outSoftMax = F.softmax(output, dim=1)
                    out_max = outSoftMax.max(axis=1)
                    # print(indices)
                    out_arg_max = out_max[1].cpu().numpy()
                    indices = out_max[0].detach().cpu().numpy().argsort()

                    #For finding S
                    Sarray = []
                    adv_images1 = data_norm.squeeze().detach().cpu().numpy()
                    # labels1 = out_arg_max
                    # label_indices = np.where(labels1 == labelCount)[0]
                    # a_i = adv_images1[label_indices,:,:]
                    a_i = adv_images1
                    for k in range(len(a_i)):
                        image= a_i[k,:,:]
                        Sarray.append(np.min(np.sqrt(np.sum(np.square(np.subtract(given_dataset, np.tile(np.expand_dims(image, axis=0), [1000,1,1]))),axis=(1,2)))))

                    sSortIndex = np.asarray(Sarray).argsort()

                    sValitem=0
                    image_iter = 0
                    idx = 99
                    while image_iter < 10 and idx >= 0:

                        if out_arg_max[sSortIndex[idx].item()].item() == labelCount: #Assuming the perturbed image wrongly classifies with high prob
                            adv_imagesList = torch.cat([adv_imagesList, data_norm[sSortIndex[idx].item()]], dim=0)
                            softMaxList.append(out_max[0][sSortIndex[idx].item()].item())
                            # sValitem = sValitem+ Sarray[sSortIndex[idx].item()]


                            # targeted_class_labelsList.append(originalLabel)
                            # image_namesList.append(f'sasi_{image_iter}_{out_arg_max[indices[idx].item()].item()}_{output[indices[idx].item()][out_arg_max[indices[idx].item()].item()].item()}')
                            image_iter +=1
                        idx -= 1
                    # print("Sval of " + str(labelCount) + " : " + str(sValitem/len(Sarray)))

                    # # For epsilon adjustment
                    # softmaxIndex = sorted(range(len(softMaxList)), key=softMaxList.__getitem__)
                    #
                    # if(len(softMaxList)>=10):
                    #     if softMaxList[softmaxIndex[len(softMaxList)-10]] > 0.8:
                    #         epsilonarr[labelCount] = 0
                    #         print("Epsilon stopped for " + str(labelCount))

                remove_files_in_dir(os.path.join(os.getcwd(), f'adv_imgs_{labelCount}'))
                # os.mkdir(f'adv_imgs_{labelCount}')
                # for idx, img in enumerate(adv_imagesList):
                #     img_t = (img.detach().cpu().numpy() * std[0] + mean[0]) * 255
                #     im = Image.fromarray(img_t.reshape(28, 28).astype('uint8'), mode='L')
                #     im.save(f'adv_imgs_{labelCount}/i_{idx}_{softMaxList[idx]}.jpg')

                    # pixels = np.array((img * 255).cpu().detach().numpy(), dtype='int')
                    # pixels = pixels.reshape((28, 28))
                    # plt.imsave(
                    #     f'adv_imgs_{labelCount}/sasi_{idx}_{softMaxList[idx]}.jpeg',
                    #     pixels)



                #Sort the image list adn indices
                adv_imagesList4dim = adv_imagesList[:,None,:, :]
                with torch.no_grad():
                    outputAdvList = model(adv_imagesList4dim)
                outSoftMaxAdvList = F.softmax(outputAdvList, dim=1)
                outAdvMax =  outSoftMaxAdvList.max(axis=1)
                indicesAdvList = outAdvMax[0].detach().cpu().numpy().argsort()
                outAdvArgMax = outAdvMax[1].cpu().numpy()

                for idx, img in enumerate(adv_imagesList):
                    img_t = (img.detach().cpu().numpy() * std[0] + mean[0]) * 255
                    im = Image.fromarray(img_t.reshape(28, 28).astype('uint8'), mode='L')
                    im.save(f'adv_imgs_{labelCount}/i_{idx}_{outAdvMax[0][idx].item()}_{outAdvArgMax[idx].item()}.jpg')

                #For finding final 10 based on S score
                FinalSarray = []
                Finaladv_images1 = adv_imagesList4dim.squeeze().detach().cpu().numpy()
                # labels1 = out_arg_max
                # label_indices = np.where(labels1 == labelCount)[0]
                # a_i = adv_images1[label_indices,:,:]
                Finala_i = Finaladv_images1
                for k in range(len(Finala_i)):
                    Finalimage = Finala_i[k, :, :]
                    FinalSarray.append(np.min(np.sqrt(
                        np.sum(np.square(np.subtract(given_dataset, np.tile(np.expand_dims(Finalimage, axis=0), [1000, 1, 1]))),
                               axis=(1, 2)))))

                FinalsSortIndex = np.asarray(FinalSarray).argsort()

                image_iter_advlist = 0
                idx_advList = len(adv_imagesList4dim)-1
                SvalIndividual= 0
                while image_iter_advlist < 10 and idx_advList >= 0:

                    if (outAdvArgMax[FinalsSortIndex[idx_advList].item()].item() == labelCount) & (outAdvMax[0][FinalsSortIndex[idx_advList].item()].item() > 0.8):
                        adv_images.append(adv_imagesList4dim[FinalsSortIndex[idx_advList]])
                        totalSoftmaxList.append(outAdvMax[0][FinalsSortIndex[idx_advList].item()].item())
                        maxIndex.append(outAdvMax[1][FinalsSortIndex[idx_advList].item()].item())
                        Sval = Sval + FinalSarray[FinalsSortIndex[idx_advList].item()]
                        SvalIndividual =SvalIndividual + FinalSarray[FinalsSortIndex[idx_advList].item()]
                        # with torch.no_grad():
                        #     sfMax.append((F.softmax(model(adv_imagesList4dim[indicesAdvList[idx_advList]][:,None,:,:]), dim=1)).max(axis=1)[0].item())
                        # forSoftMaxList = torch.cat([forSoftMaxList, adv_imagesList4dim[indicesAdvList[idx_advList].item()]], dim=0)
                        targeted_class_labels.append(labelCount)
                        image_names.append(
                            f'sasi_{image_iter_advlist}_{outAdvArgMax[FinalsSortIndex[idx_advList].item()].item()}_{outSoftMaxAdvList[FinalsSortIndex[idx_advList].item()][outAdvArgMax[FinalsSortIndex[idx].item()].item()].item()}')
                        image_iter_advlist += 1
                    idx_advList -= 1
                # print("Minimum Softmax of " + str(labelCount) + ":" + str(
                #     min(totalSoftmaxList[10 * labelCount:10 * labelCount + 10])))
                # print("Maximum Softmax of " + str(labelCount) + ":" + str(
                #     max(totalSoftmaxList[10 * labelCount:10 * labelCount + 10])))
                print(SvalIndividual/10)
                targetID+=1    # break
                # print("Done -> "+str(labelCount))
            # forSoftMaxList = forSoftMaxList[:,None, :, :]
            # with torch.no_grad():
            #     SMoutputAdvList = model(forSoftMaxList)
            # softMaxValsFinal = F.softmax(SMoutputAdvList, dim=1)
            # sFmAX = softMaxValsFinal.max(axis=1)[0]
            print("S Value: " + str(Sval/len(maxIndex)))
            if len(adv_images) < 100:
                print("Not enough images generated")
            iterNum = [i + 1 for i in iterNum]

    return adv_images,image_names,targeted_class_labels
Exemplo n.º 13
0
def generate_hole(data):
    hdata = data.clone()
    hdata[250:300, 250:300, 250:300] = -1
    #pixels = [i for i, x in enumerate(hdata) if x ==-1]
    return hdata  # {"hdata":hdata, "pixels": pixels}
Exemplo n.º 14
0
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        # for batch_idx, (data, target) in enumerate(self.val_loader[0]):
        #     if self.cuda:
        #         data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
        #     with torch.no_grad():
        #         #output = self.model(data)
        #         data_cpy = data.clone().detach()
        #         std_cpy = data.clone().detach() # std_cpy is used for finding the standard accuracy and has transforms applied as normal
        #         # data_cpy = torch.tensor([])
        #         # std_cpy = torch.tensor([])
        #         # for idx in range(len(data_cpy)):
        #         #     #print("Tensor is cuda?", data_cpy.is_cuda)
        #
        #         #     data_cpy = torch.cat((data_cpy, torch.tensor(transforms.functional.normalize(transforms.functional.to_tensor(data[idx, :]), IMAGENET_MEAN, IMAGENET_STD)      )))
        #         #     #std_cpy[idx] = transforms.functional.normalize(data[idx].clone().cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda() # DELETE
        #         #     transformedTensor = applyTransforms(np.copy(data[idx, :]))
        #         #     std_cpy = torch.cat((std_cpy, torch.tensor(transforms.functional.normalize(transformedTensor.clone().cpu(), IMAGENET_MEAN, IMAGENET_STD))))
        #         #     #std_cpy[idx, :] = transforms.functional.normalize(transformedTensor.cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda()
        #         #     transformedImage = norm_to_pil_image(np.array(std_cpy[idx, :].cpu()))
        #         #     transformedImage.save('sample_data/standard' + str(idx) + '.png')
        #         #     untransformedImage = norm_to_pil_image(np.array(data_cpy[idx, :].cpu()))
        #         #     untransformedImage.save('sample_data/data' + str(idx) + '.png')
        #         #     # print(np.array(data_cpy[idx].cpu()) - np.array(std_cpy[idx].cpu()))
        #         output = self.model(std_cpy)
        #         std_logits.update(output.cpu())
        #         loss = F.cross_entropy(output, target, reduction='none').cpu()
        #         std_loss.update(loss)
        #         corr = correct(output, target)
        #         corr = corr.view(corr.size()[0]).cpu()
        #         std_corr.update(corr)
        #
        #     run_output = {'std_loss':std_loss.avg,
        #                   'std_acc':std_corr.avg}
        #     print('Standard Batch', batch_idx)
        #     print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)

            # for idx in range(len(data)):
            #     savedImage = norm_to_pil_image(data[idx])
            #     savedImage.save("sample_data/eric" + str(idx) + '.png')

            # with torch.no_grad():
            #     #output = self.model(data)
            #     data_cpy = data.clone().detach()
            #     std_cpy = data.clone().detach() # std_cpy is used for finding the standard accuracy and has transforms applied as normal
            #     # data_cpy = torch.tensor([])
            #     # std_cpy = torch.tensor([])
            #     # for idx in range(len(data_cpy)):
            #     #     #print("Tensor is cuda?", data_cpy.is_cuda)

            #     #     data_cpy = torch.cat((data_cpy, torch.tensor(transforms.functional.normalize(transforms.functional.to_tensor(data[idx, :]), IMAGENET_MEAN, IMAGENET_STD)      )))
            #     #     #std_cpy[idx] = transforms.functional.normalize(data[idx].clone().cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda() # DELETE
            #     #     transformedTensor = applyTransforms(np.copy(data[idx, :]))
            #     #     std_cpy = torch.cat((std_cpy, torch.tensor(transforms.functional.normalize(transformedTensor.clone().cpu(), IMAGENET_MEAN, IMAGENET_STD))))
            #     #     #std_cpy[idx, :] = transforms.functional.normalize(transformedTensor.cpu(), IMAGENET_MEAN, IMAGENET_STD).cuda()
            #     #     transformedImage = norm_to_pil_image(np.array(std_cpy[idx, :].cpu()))
            #     #     transformedImage.save('sample_data/standard' + str(idx) + '.png')
            #     #     untransformedImage = norm_to_pil_image(np.array(data_cpy[idx, :].cpu()))
            #     #     untransformedImage.save('sample_data/data' + str(idx) + '.png')
            #     #     # print(np.array(data_cpy[idx].cpu()) - np.array(std_cpy[idx].cpu()))
            #     output_adv = self.model(data)
            #     adv_logits.update(output_adv.cpu())
            #     loss = F.cross_entropy(output_adv, target, reduction='none').cpu()
            #     adv_loss.update(loss)
            #     corr = correct(output_adv, target)
            #     corr = corr.view(corr.size()[0]).cpu()
            #     adv_corr.update(corr)

            rand_target = torch.randint(0,
                                        self.nb_classes - 1,
                                        target.size(),
                                        dtype=target.dtype,
                                        device='cuda')
            rand_target = torch.remainder(target + rand_target + 1,
                                          self.nb_classes)

            data_cpy = data.clone().detach()

            for idx in range(len(data_cpy)):
                # savedImage = norm_to_pil_image(data_adv[idx])
                # savedImage.save("sample_data/before_transforms" + str(idx) + '.png')
                unnormalized = reverse_normalization(data[idx])
                changed = np.swapaxes(
                    np.array(unnormalized.cpu().detach()) * 255.0, 0, 2)

                transformed = applyTransforms(
                    np.swapaxes(
                        np.array(unnormalized.cpu().clone().detach()) * 255.0,
                        0, 2))
                data_cpy[idx] = transforms.functional.normalize(
                    transformed.clone().cpu(), IMAGENET_MEAN,
                    IMAGENET_STD).cuda()

            #from PIL import Image
            data_adv = self.attack(self.model,
                                   data_cpy,
                                   rand_target,
                                   avoid_target=False,
                                   scale_eps=False)

            # for idx in range(len(data)):
            #     savedImage = norm_to_pil_image(data_adv[idx])
            #     savedImage.save("sample_data/eric" + str(idx) + '.png')

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target,
                                       reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss': adv_loss.avg, 'adv_acc': adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {
            'std_acc': std_corr.avg.item(),
            'adv_acc': adv_corr.avg.item()
        }
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
Exemplo n.º 15
0
def train(model, train_loader, valid_loader, num_epochs=5, learning_rate=1e-4):

    start_time = time.time()
    print("Training Started...")

    torch.manual_seed(42)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    # Store loss and accuracy in lists for each epoch
    train_acc, val_acc, train_loss, val_loss = [], [], [], []

    for epoch in range(num_epochs):

        total_train_loss = 0.0
        total_val_loss = 0.0
        n = 1
        i = 1
        for data in train_loader:

            if use_cuda and torch.cuda.is_available():
                data = data.cuda()  # send data to cuda

            datam = zero_out_random_feature(data.clone(
            ))  # zero out one categorical feature on a cloned item
            recon = model(datam)  # pass through model
            loss = criterion(recon,
                             data)  # compare ground truth with prediction
            loss.backward()  # back propagation
            optimizer.step()  # update weights
            optimizer.zero_grad()  # zero gradient
            total_train_loss += loss.item()
            n = n + 1

        # tracking validation loss
        train_loss.append(total_train_loss / n)

        for data in valid_loader:
            if use_cuda and torch.cuda.is_available():
                data = data.cuda()  # send data to cuda

            datam = zero_out_random_feature(data.clone(
            ))  # zero out one categorical feature on a cloned item
            recon = model(datam)  # pass through model
            loss = criterion(recon,
                             data)  # compare ground truth with prediction
            total_val_loss += loss.item()
            i = i + 1

        val_loss.append(total_val_loss / i)

        # tracking accuracy
        train_acc.append(get_accuracy(model, train_loader))
        val_acc.append(get_accuracy(model, valid_loader))

        print(epoch, train_loss[-1], val_loss[-1], train_acc[-1], val_acc[-1])

    n = len(train_acc)

    print('Finished Training')
    end_time = time.time()
    elapsed_time = end_time - start_time
    print("Total time elapsed: {:.2f} seconds".format(elapsed_time))

    # plot loss
    plt.title("Epoch vs Loss")
    plt.plot(range(0, n), train_loss,
             label='Train')  # plotting the training accuracy
    plt.plot(range(0, n), val_loss,
             label='Validation')  # plotting the validation accuracy
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.legend(loc='upper right')
    plt.show()
    print("Final Train Loss: {}".format(train_loss[-1]))
    print("Final Validation Loss: {}".format(val_loss[-1]))

    # plot accuracy
    plt.title("Epoch vs Accuracy")
    plt.plot(range(0, n), train_acc,
             label='Train')  # plotting the training accuracy
    plt.plot(range(0, n), val_acc,
             label='Validation')  # plotting the validation accuracy
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy")
    plt.legend(loc='lower right')
    plt.show()
    print("Final Train Accuracy: {}".format(train_acc[-1]))
    print("Final Validation Accuracy: {}".format(val_acc[-1]))
Exemplo n.º 16
0
def convert_to_binary(data: torch.FloatTensor, threshold=3):
    rtn = data.clone()
    rtn[data == 0.] = -1.
    rtn[[x & y for (x, y) in zip([data > 0.], [data < threshold])]] = 0.
    rtn[data >= threshold] = 1.
    return rtn
Exemplo n.º 17
0
 def __init__(self, data, args_params):
     self.data = data.clone()
     self.num_nodes = self.data.x.size()[0]
     self.num_edges = (self.data.edge_index_train.size()[1] if hasattr(
         self.data, "edge_index_train") else self.data.edge_index.size()[1])
Exemplo n.º 18
0
def reduce_tensor(data, world_size):
    rt = data.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= world_size
    return rt
Exemplo n.º 19
0
def save_image(filename, data):
    img = data.clone().clamp(0, 255).numpy()
    img = img.transpose(1, 2, 0).astype("uint8")
    img = Image.fromarray(img)
    img.save(filename)
Exemplo n.º 20
0
def main(args):

    with open("./saved_models/{}/{}/{}/{}/{}".format(args.dataset,
                                                     args.backbone, args.net,
                                                     args.mode,
                                                     args.config)) as f:
        cfg = yaml.safe_load(f)

    # ---------------------------------------------Prepare configuration for experiment -------------------------
    NET_ARGS = cfg['NET_ARGS']
    DATA_ARGS = cfg['DATA_ARGS']
    EXP_ARGS = cfg['EXP_ARGS']
    attack_dir = DATA_ARGS[
        'attack_test_dir'] if args.split == 'test' else DATA_ARGS[
            'attack_train_dir']

    # --------------------------------------------- Prepare the dataset------------------------------------------
    attack_dataset = datasets.ImageFolder(
        attack_dir,
        transforms.Compose([
            transforms.Resize(size=(DATA_ARGS['img_size'],
                                    DATA_ARGS['img_size'])),
            transforms.ToTensor(),
        ]))

    data_loader = torch.utils.data.DataLoader(
        attack_dataset,
        batch_size=EXP_ARGS['train_push_batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=False,
        sampler=None)
    total_batches = len(data_loader)

    # --------------------------------------------- Prepare model -----------------------------------------------
    modelpath = './saved_models/{}/{}/{}/{}/{}'.format(args.dataset,
                                                       args.backbone, args.net,
                                                       args.mode,
                                                       args.checkpoint)

    # (Commented) Load full model instead of state_dict.
    # But gives error when the code is refactored to merge multiple folders.
    # Dont use torch.save() with full object in  the future.

    #ppnet = torch.load(modelpath)
    #torch.save(ppnet.state_dict(), 'model.pth')

    # -------------------  Construct model and load state_dict --------------------------------------------------

    ppnet = model_Proto.construct_PPNet(
        base_architecture=NET_ARGS['base_architecture'],
        pretrained=False,
        img_size=DATA_ARGS['img_size'],
        prototype_shape=NET_ARGS['prototype_shape'],
        num_classes=DATA_ARGS['num_classes'],
        prototype_activation_function=NET_ARGS[
            'prototype_activation_function'],
        add_on_layers_type=NET_ARGS['add_on_layers_type'],
    )

    ppnet.load_state_dict(torch.load(modelpath))

    ppnet_multi = torch.nn.DataParallel(ppnet)
    ppnet_multi.eval()

    print("\nModel path is {}".format(modelpath))

    correct_fr = 0
    correct_att = 0
    num_samples = 0
    adv_correct_fr = 0
    adv_correct_att = 0

    criterion = torch.nn.CrossEntropyLoss().cuda()

    # --------------------------------------------- Prepare attack ----------------------------------------------

    attack_params = get_attack_params(args.attack)
    print(
        "Attack type: {}, Epsilon: {:.5f}, Alpha: {:.5f},  Iters: {}\n".format(
            attack_params['TYPE'], attack_params['EPS'],
            attack_params['ALPHA'], attack_params['ITERS']))

    # --------------------------------------------- Run attack --------------------------------------------------

    for it, (data, target) in enumerate(data_loader):

        data, target = data.cuda(), target.cuda()
        num_samples += data.shape[0]

        # ------------------------- predictions on clean image --------------------------------------------------
        output_fr = ppnet_multi(normalize_fn(data.clone().detach()))[0]

        pred_fr = output_fr.data.max(1, keepdim=True)[1]

        correct_fr += pred_fr.eq(target.data.view_as(pred_fr)).cpu().sum()

        # -------------------------prepare adversarial image-------------------------
        adv_img = attack_fns_Proto(ppnet_multi,
                                   criterion,
                                   data,
                                   target,
                                   eps=attack_params['EPS'],
                                   alpha=attack_params['ALPHA'],
                                   attack_type=attack_params['TYPE'],
                                   iters=attack_params['ITERS'],
                                   normalize_fn=normalize_fn)

        # -------------------------predictions on adversarial image----------------------------------------------
        output_adv_fr = ppnet_multi(normalize_fn(adv_img.clone().detach()))[0]

        pred_adv_fr = output_adv_fr.data.max(1, keepdim=True)[1]
        adv_correct_fr += torch.sum(output_adv_fr.argmax(
            dim=-1) == target).item()

        # -----------------------------Print Stats---------------------------------------------------------------
        print("Batch: [{}/{}]\t FR_branch Normal: {:.2f}%\t"
              "FR_branch Adv: {: .2f}%, ".format(
                  it, total_batches, (100.0 * float(correct_fr) / num_samples),
                  (100.0 * float(adv_correct_fr) / num_samples)))
        del data, target, output_fr, output_adv_fr

    print("Final \t  FR_branch Normal: {:.2f}%\t"
          "FR_branch Adv: {:.2f}%\n".format(
              (100.0 * float(correct_fr) / num_samples),
              (100.0 * float(adv_correct_fr) / num_samples)))
Exemplo n.º 21
0
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        for batch_idx, (data, target) in enumerate(self.val_loader[0]):
            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)
            with torch.no_grad():
                std_cpy = data.clone().detach(
                )  # std_cpy is used for finding the standard accuracy and has transforms applied as normal
                output = self.model(std_cpy)
                std_logits.update(output.cpu())
                loss = F.cross_entropy(output, target, reduction='none').cpu()
                std_loss.update(loss)
                corr = correct(output, target)
                corr = corr.view(corr.size()[0]).cpu()
                std_corr.update(corr)

            run_output = {'std_loss': std_loss.avg, 'std_acc': std_corr.avg}
            print('Standard Batch', batch_idx)
            print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(
                    non_blocking=True)

            rand_target = torch.randint(0,
                                        self.nb_classes - 1,
                                        target.size(),
                                        dtype=target.dtype,
                                        device='cuda')
            rand_target = torch.remainder(target + rand_target + 1,
                                          self.nb_classes)

            data_cpy = data.clone().detach()

            for idx in range(len(data_cpy)):
                unnormalized = reverse_normalization(data[idx])
                changed = np.swapaxes(
                    np.array(unnormalized.cpu().detach()) * 255.0, 0, 2)

                transformed = applyTransforms(
                    np.swapaxes(
                        np.array(unnormalized.cpu().clone().detach()) * 255.0,
                        0, 2))
                data_cpy[idx] = transforms.functional.normalize(
                    transformed.clone().cpu(), IMAGENET_MEAN,
                    IMAGENET_STD).cuda()

            data_adv = self.attack(self.model,
                                   data_cpy,
                                   rand_target,
                                   avoid_target=False,
                                   scale_eps=False)

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target,
                                       reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss': adv_loss.avg, 'adv_acc': adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {
            'std_acc': std_corr.avg.item(),
            'adv_acc': adv_corr.avg.item()
        }
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
Exemplo n.º 22
0
    def evaluate(self):
        self.model.eval()

        std_loss = Accumulator('std_loss')
        adv_loss = Accumulator('adv_loss')
        std_corr = Accumulator('std_corr')
        adv_corr = Accumulator('adv_corr')
        std_logits = Accumulator('std_logits')
        adv_logits = Accumulator('adv_logits')

        seen_classes = []
        adv_images = Accumulator('adv_images')
        first_batch_images = Accumulator('first_batch_images')

        from PIL import Image

        for batch_idx, (data, target) in enumerate(self.val_loader[0]):
            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
            with torch.no_grad():
                std_cpy = data.clone().detach()
                output = self.model(std_cpy)
                std_logits.update(output.cpu())
                loss = F.cross_entropy(output, target, reduction='none').cpu()
                std_loss.update(loss)
                corr = correct(output, target)
                corr = corr.view(corr.size()[0]).cpu()
                std_corr.update(corr)
        
            run_output = {'std_loss':std_loss.avg,
                          'std_acc':std_corr.avg}
            print('Standard Batch', batch_idx)
            print(run_output)

        for batch_idx, (data, target) in enumerate(self.val_loader[1]):

            # data is normalized at this point

            if self.cuda:
                data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)

            rand_target = torch.randint(
                0, self.nb_classes - 1, target.size(),
                dtype=target.dtype, device='cuda')
            rand_target = torch.remainder(target + rand_target + 1, self.nb_classes)

            from PIL import Image
            data_adv = self.attack(self.model, data, rand_target,
                                   avoid_target=False, scale_eps=False)

            with torch.no_grad():
                output_adv = self.model(data_adv)
                adv_logits.update(output_adv.cpu())
                loss = F.cross_entropy(output_adv, target, reduction='none').cpu()
                adv_loss.update(loss)
                corr = correct(output_adv, target)
                corr = corr.view(corr.size()[0]).cpu()
                adv_corr.update(corr)

            run_output = {'adv_loss':adv_loss.avg,
                          'adv_acc':adv_corr.avg}
            print('Adv Batch', batch_idx)
            print(run_output)

        summary_dict = {'std_acc':std_corr.avg.item(),
                        'adv_acc':adv_corr.avg.item()}
        print(std_loss.avg, std_corr.avg, adv_loss.avg, adv_corr.avg)
Exemplo n.º 23
0
def run_experiment(netG,
                   dataloader_test,
                   nn_path,
                   opt,
                   optimize_red_first=False,
                   n_bfgs_iter=50,
                   lbfgs_lr=0.05,
                   num_lbfgs_trials=3):
    """
    Optimize over the latent noise to try to reconstruct a target image.
    """
    # read the training set to get images for nearest neighbors
    search_images, search_image_names = nearest_neighbors.read_search_images(
        search_dataset=nn_path,
        classes_of_interest=opt.class_names,
        num_channels=opt.original_nc,
        use_cuda=opt.cuda)
    # prepare data loader
    data_looper = GanImageloader()
    iterator_data = data_looper.return_iterator(dataloader_test,
                                                opt.cuda,
                                                opt.nc,
                                                return_labels=False,
                                                num_passes=1)

    # the generator in the eval mode
    netG.eval()

    # create folders
    for cl in opt.class_names:
        os.system('mkdir -p {0}'.format(os.path.join(opt.experiment, cl)))

    l2_dists = {cl: [] for cl in opt.class_names}
    lls_noise = {cl: [] for cl in opt.class_names}
    lls_noise_init = {cl: [] for cl in opt.class_names}
    nn_dists = {cl: [] for cl in opt.class_names}

    #first arg = mean, secondarg = cov
    mvn = multivariate_normal(np.zeros(opt.nz), np.identity(opt.nz))

    def compute_rec_error(data, rec):
        rec_error = data - rec
        l2_dist = torch.sum(rec_error**2 / rec_error.numel())**0.5
        return l2_dist

    def im_to_01(im):
        return im * opt.std_val + opt.mean_val

    def im_to_original(im):
        return (im - opt.mean_val) / opt.std_val

    for i_batch, data in enumerate(iterator_data):
        print('Batch {}'.format(i_batch))
        class_name = 'neutrophils'

        netG_forward = lambda input: netG(input)

        reconstructions_best = data.clone()
        reconstructions_best.data.zero_()
        reconstructions_best_init = data.clone()
        reconstructions_best_init.data.zero_()
        reconstructions_error_best = [float('inf')] * data.size(0)
        ll_noise_best = [float('inf')] * data.size(0)
        ll_noise_init_best = [float('inf')] * data.size(0)
        nn_dists_batch = [float('inf')] * data.size(0)

        for i_trial in range(num_lbfgs_trials):
            print('Class {0}: {1}, trial {2} of {3}'.format(
                1, 'neutrophils', i_trial + 1, num_lbfgs_trials))
            sys.stdout.flush()
            # get the noise leading to the good reconstruction
            if optimize_red_first:
                noise_init, noise = reconstruct_cells_red_first(
                    data,
                    netG_forward,
                    opt,
                    n_bfgs_iter=n_bfgs_iter,
                    lbfgs_lr=lbfgs_lr)
            else:
                print('Optimize red first else')
                noise_init, noise = reconstruct_cells(data,
                                                      netG_forward,
                                                      opt,
                                                      n_bfgs_iter=n_bfgs_iter,
                                                      lbfgs_lr=lbfgs_lr)

            # get reconstructions
            reconstructions_init = netG_forward(noise_init)
            reconstructions = netG_forward(noise)

            # compute reconstruction errors
            for i_im in range(reconstructions.size(0)):
                # get log-likelihoods
                noise_np = noise[i_im].view(-1).data.cpu().numpy()
                ll_noise = -mvn.logpdf(noise_np)
                noise_init_np = noise_init[i_im].view(-1).data.cpu().numpy()
                ll_noise_init = -mvn.logpdf(noise_init_np)

                l2_dist = compute_rec_error(
                    im_to_01(data[i_im].data),
                    im_to_01(reconstructions[i_im].data))

                if l2_dist < reconstructions_error_best[i_im]:
                    reconstructions_error_best[i_im] = l2_dist
                    reconstructions_best[i_im] = reconstructions[i_im]
                    reconstructions_best_init[i_im] = reconstructions_init[
                        i_im]
                    ll_noise_best[i_im] = ll_noise
                    ll_noise_init_best[i_im] = ll_noise_init

        # find nearest neighbors from the training set
        neighbors = torch.FloatTensor(reconstructions_best.size())
        if opt.cuda:
            neighbors = neighbors.cuda()
        for i_im in range(reconstructions_best.size(0)):
            ref_im = data[i_im].data
            ref_im_01 = im_to_01(ref_im)
            _, nn_ids = nearest_neighbors.find_neighbors(
                ref_im_01,
                search_images['neutrophils'],
                search_image_names['neutrophils'],
                num_neighbors=1)
            nn_im_01 = search_images['neutrophils'][nn_ids[0]]
            neighbors[i_im] = im_to_original(nn_im_01)
            nn_dists_batch[i_im] = compute_rec_error(nn_im_01, ref_im_01)
        neighbors = Variable(neighbors)

        # save results
        for i_im in range(reconstructions_best.size(0)):
            all_images = [
                data[i_im], reconstructions_best[i_im],
                reconstructions_best_init[i_im], neighbors[i_im]
            ]
            all_images = torch.stack(all_images, 0)
            all_images = pad_channels(all_images.data, 1)
            file_name = os.path.join(
                opt.experiment, 'neutrophils',
                '{0}_batch{1}_image{2}.png'.format(class_name, i_batch, i_im))
            vutils.save_image(im_to_01(all_images), file_name)

            l2_dist = reconstructions_error_best[i_im]
            ll_noise = ll_noise_best[i_im]
            ll_noise_init = ll_noise_init_best[i_im]

            l2_dists[class_name].append(l2_dist)
            lls_noise[class_name].append(ll_noise)
            lls_noise_init[class_name].append(ll_noise_init)
            nn_dists[class_name].append(nn_dists_batch[i_im])

    # saving the full reconstruction data
    all_data = {
        'l2_dists': l2_dists,
        'lls_noise': lls_noise,
        'lls_noise_init': lls_noise_init,
        'nn_dists': nn_dists
    }
    print('Saving pth....')
    torch.save(all_data, os.path.join(opt.experiment,
                                      'reconstruction_data.pth'))

    # print aggregated statistics
    for i_class, class_name in enumerate(opt.class_names):
        l2 = np.array(l2_dists[class_name])
        l2_mean = np.mean(l2)
        l2_std = np.std(l2)
        ll_noise = np.array(lls_noise[class_name])
        ll_noise_mean = np.mean(ll_noise)
        ll_noise_std = np.std(ll_noise)
        ll_noise_init = np.array(lls_noise_init[class_name])
        ll_noise_init_mean = np.mean(ll_noise_init)
        ll_noise_init_std = np.std(ll_noise_init)
        nn_d = np.array(nn_dists[class_name])
        nn_d_mean = np.mean(nn_d)
        nn_d_std = np.std(nn_d)
        print(
            'Class {0}: L2-reconstr mean {1:0.3f} std {2:0.3f}; L2-noise mean {3:0.3f} std {4:0.3f}; L2-noise-init mean {5:0.3f} std {6:0.3f}; NN dist {7:0.3f} std {8:0.3f}'
            .format(class_name, l2_mean, l2_std, ll_noise_mean, ll_noise_std,
                    ll_noise_init_mean, ll_noise_init_std, nn_d_mean,
                    nn_d_std))

    l2 = np.concatenate([np.array(d) for d in l2_dists.values()])
    l2_mean = np.mean(l2)
    l2_std = np.std(l2)
    ll_noise = np.concatenate([np.array(d) for d in lls_noise.values()])
    ll_noise_mean = np.mean(ll_noise)
    ll_noise_std = np.std(ll_noise)
    ll_noise_init = np.concatenate(
        [np.array(d) for d in lls_noise_init.values()])
    ll_noise_init_mean = np.mean(ll_noise_init)
    ll_noise_init_std = np.std(ll_noise_init)
    nn_d = np.concatenate([np.array(d) for d in nn_dists.values()])
    nn_d_mean = np.mean(nn_d)
    nn_d_std = np.std(nn_d)

    print(
        'All classes: L2-reconstr mean {0:0.3f} std {1:0.3f}; L2-noise mean {2:0.3f} std {3:0.3f}; L2-noise-init mean {4:0.3f} std {5:0.3f}; NN dist {6:0.3f} std {7:0.3f}'
        .format(l2_mean, l2_std, ll_noise_mean, ll_noise_std,
                ll_noise_init_mean, ll_noise_init_std, nn_d_mean, nn_d_std))
Exemplo n.º 24
0
 def __init__(self, data, args_params):
     self.data = data.clone()
     self.num_nodes = self.data.x.size()[0]
     self.num_edges = self.data.edge_index.size()[1]
Exemplo n.º 25
0
    def __getitem__(self, idx):
        if idx == 0 and self.fixed_batch:
            printc.red('FIXED BATCH:', self.fixed_batch)
        if self.fixed_batch:
            idx = 0
        if idx >= len(self.ds_idx_seq):
            for iter in set(self.iters):  # dataloaders
                iter.__del__()
            raise StopIteration
        ds_idx = self.ds_idx_seq[idx]

        if self.args.combine in ['inbatch']:  ### destroy this later?
            if idx * len(self.datasets) >= len(self.ds_idx_seq):
                for iter in set(self.iters):  # dataloaders
                    iter.__del__()
                raise StopIteration

            aux = []
            for l in self.iters:
                sample = next(l)
                aux += [sample]

            info = {}
            info['labels'] = torch.cat([c['labels'] for c in aux])
            data = torch.cat([c['data'] for c in aux]).float().cuda()
            return data, info

        info = {}
        loader = self.loaders[ds_idx]
        sample = next(self.iters[ds_idx])
        info['loader'] = 'other'
        if isinstance(self.loaders[ds_idx].ds_info['dataset'],
                      DS_AMOS):  # only AMOS provides info
            if hasattr(loader, 'block_sizes'):
                info['block_sizes'] = loader.block_sizes[0]
                loader.block_sizes = loader.block_sizes[1:]
            info['PS_idxs'] = sample['PS_idxs']
            info['set_idxs'] = sample['set_idxs']
            info['loader'] = 'AMOS'
            info['sigmas'] = self.sigmas[ds_idx][info['PS_idxs']]

        info['labels'] = sample['labels']
        info['ds_idx'] = ds_idx
        data = sample['data'].float().cuda()

        if self.args.duplicates > 0:
            dups = self.args.duplicates
            info['labels'] = None
            dup_data = data[-2 * dups:]
            data = torch.cat([data, dup_data])
        if self.args.antiaug:
            info['labels'] = None

            if not hasattr(self, 'antiaug_tr_pad'):
                self.antiaug_tr_pad = nn.Sequential(
                    torch.nn.ReplicationPad2d(20)).cuda()
            if not hasattr(self, 'antiaug_tr_crop'):
                self.antiaug_tr_crop = nn.Sequential(
                    kornia.augmentation.CenterCrop(64)).cuda()

            data_ = data.clone()
            data_ = self.antiaug_tr_pad(data_)
            As = data_[0::2]
            Ps = data_[1::2]

            B, CH, H, W = As.shape
            aff_dict = KF.rg.random_affine_generator(B,
                                                     H,
                                                     W,
                                                     degrees=(0.0, 0.0),
                                                     translate=(0.03, 0.03),
                                                     scale=(1.0, 1.0),
                                                     shear=(0.0, 0.0),
                                                     same_on_batch=False)
            aff_params = {}
            for k, v in aff_dict.items():
                aff_params[k] = v
                if k == 'translate':
                    aff_params[k] = v.to(
                        v.device) + 0.03 * torch.sign(v).to(v.device)
            antips = torch.zeros(*data_.shape).cuda()
            antips[0::2] = KF.apply_affine(As, aff_params)
            antips[1::2] = KF.apply_affine(Ps, aff_params)

            antips = self.antiaug_tr_crop(antips)
            data = torch.cat([data, antips])
        if self.args.K:
            data = {'data': data, 'loader': info['loader']}
        return data, info
Exemplo n.º 26
0
def train(model, train_loader, valid_loader, num_epochs=5, learning_rate=1e-4):
    """ Training loop. You should update this."""
    torch.manual_seed(42)
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

    train_acc, val_acc, epochs, val_loss, train_loss = [], [], [], [], []

    for epoch in range(num_epochs):
      val = 0
      total_loss=0
      for data in train_loader:
          datam = zero_out_random_feature(data.clone()) # zero out one categorical feature
          recon = model(datam)
          loss = criterion(recon, data)
          loss.backward()
          optimizer.step()
          optimizer.zero_grad()
          total_loss += loss.item()
          val +=1
        
      train_loss.append(float(total_loss) / (val + 1))
      

      trainacc = get_accuracy(model, train_loader)
      validacc = get_accuracy(model, valid_loader)
      print("training acc: ", trainacc)
      print("validation acc: ", validacc)
      train_acc.append(trainacc)
      val_acc.append(validacc)
      epochs.append(epoch)

      val = 0
      total_loss=0
      for data in valid_loader:
        datam = zero_out_random_feature(data.clone()) # zero out one categorical feature
        recon = model(datam)
        loss = criterion(recon, data)
        
        total_loss += loss.item()
        val +=1
        
      val_loss.append(float(total_loss) / (val + 1))

    plt.title("Training Accuracy")
    plt.plot(epochs, train_acc, label="Train")
    plt.xlabel("epoch")
    plt.ylabel("Training Accuracy")
    plt.show()

    plt.title("Validation Accuracy")
    plt.plot(epochs, train_acc, label="Validation")
    plt.xlabel("epoch")
    plt.ylabel("Validation Accuracy")
    plt.show()

    # #Plotting Loss
    plt.title("Train vs Validation Loss")
    plt.plot(epochs, train_loss, label="Train")
    plt.plot(epochs, val_loss, label="Validation")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.legend(loc='best')
    plt.show()