예제 #1
0
def generate_adv_images(mean, std, model, device, data_loader, kwargs):
    adv_images = []
    targeted_class_labels = []
    image_names = []
    # your code to generate adv_images goes here

    adv_images_all = []
    targeted_class_labels_all = []
    image_names_all = []

    cost = []
    given_dataset = []
    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            if len(given_dataset) == 0:
                given_dataset = data.squeeze().detach().cpu().numpy()
            else:
                given_dataset = np.concatenate(
                    [given_dataset,
                     data.squeeze().detach().cpu().numpy()],
                    axis=0)

    for data, target in data_loader:
        data, target = data.to(device), target.to(device)
        max_it = 100
        for k in range(data.shape[0]):
            image_ = data[k, :, :]

            softmax_t, r, loop_i, label_orig, label_pert, pert_image = deepfool(
                image=image_, net=model.eval(), max_iter=max_it)

            if softmax_t > 0.8:
                adv_images_all.append(pert_image)
                targeted_class_labels_all.append(label_pert)
                image_names_all.append(str(k) + "_" + str(label_orig))
                cost.append(calculate_cost(pert_image, given_dataset))
                # for t, m, s in zip(pert_image, mean, std):
                #     t.mul_(s).add_(m)
                # pert_image = clip_tensor(pert_image, 0, 1).squeeze().detach().cpu().numpy()
                # pert_image = pert_image.squeeze().detach().cpu().numpy()
                # pert_image = 255.0 * pert_image
                #
                # fpath = "adv_images/" + str(label_pert) + "/" + str(k)
                # np.save(fpath, pert_image)
            # if adv_images.__len__() >= 10:
            #     break
            print(str(k))

    save_data("adv_images_all", adv_images_all, image_names_all,
              targeted_class_labels_all, std, mean)

    batch_size = adv_images_all.__len__()
    # batch_size = 857

    adv_images, image_names, targeted_class_labels = select_adv_images(
        mean, std, device, data_loader, batch_size, kwargs)

    return adv_images, image_names, targeted_class_labels
예제 #2
0
파일: problems.py 프로젝트: peria1/trainer
    def get_input_and_target(self):
        data_half, data_full, num_half, num_full, data, which_digit = self.MNST_data(
        )

        xs = data.size()
        self.npts = xs[2] * xs[3]

        return data.squeeze().view(-1, self.npts), data.squeeze().view(
            -1, self.npts)
예제 #3
0
def select_adv_images(mean, std, device, data_loader, batch_size, kwargs):
    adv_images_sel = []
    targeted_class_labels_sel = []
    image_names_sel = []

    adv_data_loader_all = torch.utils.data.DataLoader(
        torchvision.datasets.DatasetFolder(
            'adv_images_all',  # Change this to your adv_images folder
            loader=numpy_loader,
            extensions='.npy',
            transform=transforms.Compose(
                [transforms.ToTensor(),
                 transforms.Normalize(mean, std)])),
        batch_size=batch_size,
        **kwargs)

    given_dataset = []
    adv_images = []
    labels = []

    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            if len(given_dataset) == 0:
                given_dataset = data.squeeze().detach().cpu().numpy()
            else:
                given_dataset = np.concatenate(
                    [given_dataset,
                     data.squeeze().detach().cpu().numpy()],
                    axis=0)

        for data, target in adv_data_loader_all:
            data, target = data.to(device), target.to(device)
            adv_images = data
            labels = target

    for i in range(10):
        label_indices = np.where(labels == i)[0]
        a_i = adv_images[label_indices, :, :]

        cost_i = []
        for k in range(a_i.__len__()):
            image = a_i[k, :, :]
            cost_k = calculate_cost(image, given_dataset)
            cost_i.append(cost_k)

        ind = np.argpartition(cost_i, -10)[-10:]
        # ind = np.argpartition(cost_i, 10)[:10]
        selected_images_i = a_i[ind, :, :]

        adv_images_sel.extend(selected_images_i)
        targeted_class_labels_sel.extend(np.array([i] * 10))
        image_names_sel.extend(
            np.array(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']))

    return adv_images_sel, image_names_sel, targeted_class_labels_sel
예제 #4
0
파일: train.py 프로젝트: SAI990323/CueWord
    def forward(self, input, h):
        '''
        :param input: query 对应的向量
        :param h: query中cue_word对应的id
        :return: cue_word select的softmax结果 和 lstm的隐藏层结果
        '''
        h_t = torch.zeros(input.size(0), self.hidden_size,
                          dtype=input.dtype).to(device)
        c_t = torch.zeros(input.size(0), self.hidden_size,
                          dtype=input.dtype).to(device)
        h_t2 = torch.zeros(input.size(0), self.hidden_size,
                           dtype=input.dtype).to(device)
        c_t2 = torch.zeros(input.size(0), self.hidden_size,
                           dtype=input.dtype).to(device)
        zeros = torch.zeros(input.size(0), self.input_size,
                            dtype=input.dtype).to(device)
        #output, (h_t,c_t) = self.encoder(input, (h_t,c_t)) #encoder
        for i, data in enumerate(input.chunk(input.size(1), dim=1)):
            with torch.autograd.set_detect_anomaly(True):
                data = data.to(device)
                data = data.squeeze(1)
                l1_input = torch.cat((data, h_t), dim=1)
                (h_t, c_t) = self.encoder1(l1_input, (h_t, c_t))
                l2_input = torch.cat([zeros, h_t, h_t2], dim=1)
                (h_t2, c_t2) = self.encoder2(l2_input, (h_t2, c_t2))
        topic_tracker = torch.zeros(input.size(0),
                                    self.hidden_size).to(device).scatter(
                                        1, h, 1)  #need to add
        MLP_input = torch.cat((h_t, topic_tracker), dim=1)
        MLP_output = self.layer1(MLP_input)
        MLP_output = self.layer2(MLP_output)

        return torch.softmax(MLP_output, dim=0), (h_t, c_t), (h_t2, c_t2)
예제 #5
0
def run_epoch(train, hp):
    loader = train_loader if train else test_loader
    losses = []
    test_count = 0

    for batch_idx, data in enumerate(loader):
        if args.cuda:
            data = data.cuda()

        # change (batch, time, x) to (time, batch, x)
        data = Variable(data.squeeze().transpose(0, 1))

        batch_loss = model(data, hp)

        if train:
            optimizer.zero_grad()
            total_loss = batch_loss
            total_loss.backward()
            nn.utils.clip_grad_norm(model.parameters(), clip)
            optimizer.step()
        
        losses.append(batch_loss.item())
        
        '''
        if batch_idx % 100 == 0:
            sample_and_draw(batch_idx)
            print(batch_loss.data.cpu().numpy()[0])
        
        test_count += 1
        if test_count > 2:
            break
        '''

    return np.mean(losses)
예제 #6
0
def run_epoch(train, hp):
    loader = train_loader if train else test_loader
    losses = {}

    for batch_idx, (data, macro_goals) in enumerate(loader):
        if args.cuda:
            data, macro_goals = data.cuda(), macro_goals.cuda()

        # change (batch, time, x) to (time, batch, x)
        data = Variable(data.squeeze().transpose(0, 1))
        macro_goals = Variable(macro_goals.squeeze().transpose(0, 1))

        batch_losses = model(data, macro_goals, hp)

        if train:
            optimizer.zero_grad()
            total_loss = sum(batch_losses.values())
            total_loss.backward()
            nn.utils.clip_grad_norm(model.parameters(), clip)
            optimizer.step()

        for key in batch_losses:
            if batch_idx == 0:
                losses[key] = batch_losses[key].data[0]
            else:
                losses[key] += batch_losses[key].data[0]

    for key in losses:
        losses[key] /= len(loader.dataset)

    return losses
예제 #7
0
파일: train.py 프로젝트: SAI990323/CueWord
 def forward(self, input, h, hh):
     '''
     :param input: decoder的输入 实质时cue_word词向量
     :param h: h隐藏层特征
     :param hh: c隐藏层特征
     :return: 输出对话
     '''
     (h_t, c_t) = h
     (h_t2, c_t2) = hh
     input = self.layer(input)
     decoder_input = input
     output_sentence = torch.zeros(input.size(0),
                                   22,
                                   dict_size,
                                   dtype=input.dtype).to(device)
     zeros = torch.zeros(input.size(0), self.input_size,
                         dtype=input.dtype).to(device)
     for i, data in enumerate(
             decoder_input.chunk(decoder_input.size(1), dim=1)):
         data = data.to(device)
         data = data.squeeze(1)
         l1_input = torch.cat([data, h_t], dim=1)
         (h_t, c_t) = self.decoder1(l1_input, (h_t, c_t))
         l2_input = torch.cat([zeros, h_t2, h_t], dim=1)
         (h_t2, c_t2) = self.decoder2(l2_input, (h_t2, c_t2))
         output_sentence[:, i, :] = torch.softmax(self.linear(h_t2), dim=0)
         for j in range(input.size(0)):
             zeros[j] = torch.FloatTensor(word2vec[id2dict[int(
                 torch.argmax(output_sentence[j, i, :]))]])
     return output_sentence
예제 #8
0
    def get_test_batch(self, indices):
        # num of batches: len(indices)
        data = next(self.dataset_iter)
        data = data.squeeze(0).numpy().astype(self.input_data_type) / 255.0
        data = [data[i - self.seq_len:i + self.horizon] for i in indices]
        data = np.stack(data, axis=0)

        return data
예제 #9
0
파일: test.py 프로젝트: maortiz1/SSR_deepp
    def test_all(self):
        self.model.eval()

        loss_ts=[]
        psnr_ts=[]
        ssim_ts=[]
        self.scores = []
        self.targets=[]
        self.data=[]
            
        for batch_idx,(data,target) in tqdm.tqdm(enumerate(self.loader_test),total=len(self.loader_test),ncols=80,leave=False):


            if self.cuda:
                data,target = data.to(self.device),target.to(self.device)
            score = self.model(data)   
            loss = self.loss(score,target)
            loss_ts.append(loss.item())
            for k in range(0,score.shape[0]):  
                d = data[k,::,::,::,::] 
                t = target[k,::,::,::,::] 

                s = score[k,::,::,::,::]   
                    
                p,s = self.metrics(t.squeeze(),s.squeeze())
                psnr_ts.append(p)
                ssim_ts.append(s)

            d = data.squeeze().permute(1,2,0)
            d_cpu = d.cpu().data.numpy()
            t = target.squeeze().permute(1,2,0)
            t_cpu = t.cpu().data.numpy()
            s = score.squeeze().permute(1,2,0)
            s_cpu = s.cpu().data.numpy()
            if self.best_psnr<p:
               self.best_psnr=p
               self.best_t = t_cpu
               self.best_d = d_cpu
               self.best_s = s_cpu
                        
            
            self.data.append(d_cpu)
            
            self.scores.append(s_cpu)
            self.targets.append(t_cpu)  

        mean_psnr = np.mean(psnr_ts)
        mean_ssim = np.mean(ssim_ts)
        mean_loss = np.mean(loss_ts)
      
        
        print('\n Validation PSNR: ',str(mean_psnr))
        print('\n Validation SSIM: ',str(mean_ssim))
        print('\n Validation Loss: ',str(mean_loss)) 
예제 #10
0
def eq3(image, data_loader):
    device = torch.device("cpu")
    given_dataset = []
    for data, target in data_loader:
        data, target = data.to(device), target.to(device)
        if len(given_dataset) == 0:
            given_dataset = data.squeeze().detach().cpu().numpy()
        else:
            given_dataset = np.concatenate(
                [given_dataset,
                 data.squeeze().detach().cpu().numpy()], axis=0)
    given_dataset = given_dataset.reshape(-1, 28, 28)

    S = np.min(
        np.sqrt(
            np.sum(np.square(
                np.subtract(
                    given_dataset,
                    np.tile(np.expand_dims(image, axis=0), [1000, 1, 1]))),
                   axis=(1, 2))))
    return S / 100
    def __getitem__(self, index):
        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index]]
        blobs = get_minibatch(minibatch_db, self._num_classes)

        # print(blobs)
        # import IPython; IPython.embed()
        # assert(1 == 0)

        data = torch.from_numpy(blobs['data'])
        im_info = torch.from_numpy(blobs['im_info'])
        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            try:
                gt_boxes = torch.from_numpy(blobs['gt_boxes'])
            except:
                print('gt_boxes error')
                import IPython
                IPython.embed()

            im_info[0, 0] = data.size(1)
            im_info[0, 1] = data.size(2)

            gt_boxes_padding = torch.FloatTensor(self.max_num_box,
                                                 gt_boxes.size(1)).zero_()
            num_boxes = min(gt_boxes.size(0), self.max_num_box)
            gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]

            # permute trim_data to adapt to downstream processing
            try:
                data = data.transpose(2, 3).transpose(1, 2).contiguous()
            except:
                import IPython
                IPython.embed()
            im_info = im_info.view(3)

            return data.squeeze(0), im_info, gt_boxes_padding, num_boxes
        else:
            data = data.permute(0, 3, 1,
                                2).contiguous().view(3, data_height,
                                                     data_width)
            im_info = im_info.view(3)

            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0

            return data, im_info, gt_boxes, num_boxes
def train(loader, model, optimizer, epochs=100):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.to(device)
    for epoch in range(epochs):
        train_loss = 0
        for batch_idx, (data, target) in enumerate(loader):

            data = data.squeeze(1)
            data = (data / 255).to(device)
            outs = model(data)
            loss = loss_funct(outs, data)
            model.zero_grad()
            loss.backward()
            _ = torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
            optimizer.step()
            print(loss)
예제 #13
0
 def transform_inputs(loader, batch_size=batch_size):
     encoded_inputs = []
     labels = []
     tq = tqdm(loader)
     with torch.no_grad():
         for batch_idx, (data, label) in enumerate(tq):
             data = Variable(data.squeeze().transpose(0, 1))
             data = (data - data.min().item()) / (data.max().item() -
                                                  data.min().item())
             h = model.predict(data)
             for i in range(h.shape[1]):
                 encoded_inputs.append(h[:, i, :].flatten().numpy())
                 labels.append(label[i].item())
     return torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
         torch.Tensor(encoded_inputs), torch.Tensor(labels)),
                                        batch_size=batch_size,
                                        shuffle=True)
예제 #14
0
def train(params, key_order):
    torch.set_num_threads(1)
    model = Net()
    start = 0
    state_dict = model.state_dict()
    for k in key_order:
        v = state_dict[k]
        length = np.prod(v.shape)
        state_dict[k] = torch.FloatTensor(np.reshape(params[start:start+length], v.shape))
        start += length
    model.load_state_dict(state_dict)
    for data, target in train_loader:
        data = torch.cat(torch.unbind(data.squeeze(), 1),1)
        data, target = Variable(data), Variable(target)
        output = model(data)
        loss = F.cross_entropy(output, target).data[0]I
        return loss
def test(epoch):
    """uses test data to evaluate 
	likelihood of the model"""

    mean_kld_loss, mean_nll_loss = 0, 0
    for i, (data, _) in enumerate(test_loader):

        #data = Variable(data)
        data = Variable(data.squeeze().transpose(0, 1))
        #data = (data - data.min().item()) / (data.max().item() - data.min().item())

        kld_loss, nll_loss, _, _ = model(data)
        mean_kld_loss += kld_loss.item()
        mean_nll_loss += nll_loss.item()

    mean_kld_loss /= len(test_loader.dataset)
    mean_nll_loss /= len(test_loader.dataset)

    print('====> Test set loss: KLD Loss = {:.4f}, NLL Loss = {:.4f} '.format(
        mean_kld_loss, mean_nll_loss))
예제 #16
0
def train(epoch):
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):

        #transforming data
        #data = Variable(data)
        #to remove eventually
        #data, _ = data.to(device, dtype=torch.float), _.to(device, dtype=torch.float)
        data = Variable(data.squeeze().transpose(0, 1))
        data = (data - data.min().data.item()) / (data.max().data.item() -
                                                  data.min().data.item())
        #data = data.to(device)

        #forward + backward + optimize
        optimizer.zero_grad()
        kld_loss, nll_loss, _, _ = model(data)
        loss = kld_loss + nll_loss
        loss.backward()
        optimizer.step()

        #grad norm clipping, only in pytorch version >= 1.10
        nn.utils.clip_grad_norm(model.parameters(), clip)

        #printing
        if batch_idx % print_every == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\t KLD Loss: {:.6f} \t NLL Loss: {:.6f}'
                .format(epoch, batch_idx * len(data),
                        len(train_loader.dataset),
                        100. * batch_idx / len(train_loader),
                        kld_loss.data.item() / batch_size,
                        nll_loss.data.item() / batch_size))

            sample = model.sample(28)
            plt.imshow(sample.numpy())
            plt.pause(1e-6)

        train_loss += loss.data.item()

    print('====> Epoch: {} Average loss: {:.4f}'.format(
        epoch, train_loss / len(train_loader.dataset)))
예제 #17
0
        def train(epoch):
            train_loss = 0
            tq = tqdm(train_loader)
            for batch_idx, (data, _) in enumerate(tq):
                data = Variable(data.squeeze().transpose(0, 1))
                data = (data - data.min().item()) / (data.max().item() -
                                                     data.min().item())
                #forward + backward + optimize
                optimizer.zero_grad()
                kld_loss, nll_loss, _, _ = model(data)
                loss = kld_loss + nll_loss
                loss.backward()
                optimizer.step()

                #grad norm clipping, only in pytorch version >= 1.10
                nn.utils.clip_grad_norm(model.parameters(), clip)

                tq.set_postfix(kld_loss=(kld_loss.item() / batch_size),
                               nll_loss=(nll_loss.item() / batch_size))
                train_loss += loss.item()
            return
def train(epoch):
    train_loss = 0
    for batch_idx, (data, _) in enumerate(train_loader):

        #transforming data
        data = data.to(device)
        data = data.squeeze().transpose(0, 1)  # (seq, batch, elem)
        data = (data - data.min()) / (data.max() - data.min())

        #forward + backward + optimize
        optimizer.zero_grad()
        kld_loss, nll_loss, _, _ = model(data)
        loss = kld_loss + nll_loss
        loss.backward()
        optimizer.step()

        #grad norm clipping, only in pytorch version >= 1.10
        nn.utils.clip_grad_norm_(model.parameters(), clip)

        #printing
        if batch_idx % print_every == 0:
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\t KLD Loss: {:.6f} \t NLL Loss: {:.6f}'
                .format(epoch, batch_idx * batch_size,
                        batch_size * (len(train_loader.dataset) // batch_size),
                        100. * batch_idx / len(train_loader),
                        kld_loss / batch_size, nll_loss / batch_size))

            sample = model.sample(torch.tensor(28, device=device))
            plt.imshow(sample.to(torch.device('cpu')).numpy())
            plt.pause(1e-6)

        train_loss += loss.item()

    print('====> Epoch: {} Average loss: {:.4f}'.format(
        epoch, train_loss / len(train_loader.dataset)))
예제 #19
0
        cnn = torch.load(model_file, map_location=device)
        cnn.eval()  # Change model to 'eval' mode .
        nets.append(cnn)

    with torch.set_grad_enabled(False):
        i = 0
        tick = time.time()
        for spec, hash, data in test_generator:
            combined_classes = torch.zeros(6, device=device)
            for weight, net in zip(model_weigh_list, nets):
                # Here is the trick. The datagen generates batch of 1, but dataloader actually returns data in
                # batches with vaiable length. So we permutate dims to get a proper tensor
                # outputs = net(data.permute((1,0,2)).to(device))
                try:
                    a = spec.shape[4]
                    spec = spec.squeeze(0)
                    spec = spec.to(device).type(torch.cuda.FloatTensor)
                except:
                    spec = spec.to(device).type(torch.cuda.FloatTensor)
                outputs = net(spec)
                classes = torch.softmax(outputs, 1).mean(0)
                combined_classes += classes * weight
            winner = combined_classes.argmax().item()
            answer.append({'hash': hash[0], 'class': class_list[winner]})
            # print(winner)
            i += 1
            if i % 100 == 0:
                tock = time.time()
                time_to_go = (len(test_generator) - i) / 100 * (tock - tick)
                print('Batch {:d} / {:d}, {:.1f} sec, to go: {:.0f} s'.format(
                    i, len(test_generator), tock - tick, time_to_go))
예제 #20
0
def evaluate_adv_images(model, device, kwargs, mean, std, data_loader):
    batch_size = 100
    model.eval()

    adv_data_loader = torch.utils.data.DataLoader(
        torchvision.datasets.DatasetFolder(
            'adv_images',  #Change this to your adv_images folder
            loader=numpy_loader,
            extensions='.npy',
            transform=transforms.Compose(
                [transforms.ToTensor(),
                 transforms.Normalize(mean, std)])),
        batch_size=batch_size,
        **kwargs)

    evaluate_model_for_accuracy(model, device, adv_data_loader)

    given_dataset = []
    adv_images = []
    labels = []
    with torch.no_grad():
        for data, target in data_loader:
            data, target = data.to(device), target.to(device)
            if len(given_dataset) == 0:
                given_dataset = data.squeeze().detach().cpu().numpy()
            else:
                given_dataset = np.concatenate(
                    [given_dataset,
                     data.squeeze().detach().cpu().numpy()],
                    axis=0)

        for data, target in adv_data_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            label = target.squeeze().detach().cpu().numpy()
            softmax_values = torch.nn.Softmax()(output).cpu().numpy()[
                np.arange(batch_size), label]
            adv_images = data
            labels = target

    #Checking the range of generated images
    adv_images_copy = copy.deepcopy(adv_images)
    for k in range(adv_images_copy.shape[0]):
        image_ = adv_images_copy[k, :, :]

        for t, m, s in zip(image_, mean, std):
            t.mul_(s).add_(m)

        image = image_.squeeze().detach().cpu().numpy()
        image = 255.0 * image

        if np.min(image) < 0 or np.max(image) > 255:
            print('Generated adversarial image is out of range.')
            sys.exit()

    adv_images = adv_images.squeeze().detach().cpu().numpy()
    labels = labels.squeeze().detach().cpu().numpy()

    #Checking for equation 2 and equation 3
    if all([x > 0.8 for x in softmax_values.tolist()]):
        print('Softmax values for all of your adv images are greater than 0.8')
        S = 0
        for i in range(10):
            label_indices = np.where(labels == i)[0]
            a_i = adv_images[label_indices, :, :]
            for k in range(10):
                image = a_i[k, :, :]
                S = S + np.min(
                    np.sqrt(
                        np.sum(np.square(
                            np.subtract(
                                given_dataset,
                                np.tile(np.expand_dims(image, axis=0),
                                        [1000, 1, 1]))),
                               axis=(1, 2))))

        print('Value of S : {:.4f}'.format(S / 100))

    else:
        print('Softmax values for some of your adv images are less than 0.8')
예제 #21
0
        ]))
    train_loader = torch.utils.data.DataLoader(train,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=2)

    img, joint = train.__getitem__(0)
    # print("image shape is", img.shape)
    # print(joint)
    to_pil_image = transforms.ToPILImage()
    img = to_pil_image(img)
    img.show()
    for step, (data, _) in enumerate(train_loader):
        # data = Variable(data)
        # data = data.numpy()
        if step == 1:
            print(data.shape)
            img = data.squeeze()
            img = to_pil_image(img)
            img.show()

        # means = []
        # stdevs = []
        # for i in range(3):
        #     pixels = data[:, i, :, :].ravel()
        #     means.append(np.mean(pixels))
        #     stdevs.append(np.std(pixels))
        # print("means: {}".format(means))
        # print("stdevs: {}".format(stdevs))
        # print('transforms.Normalize(mean = {}, std = {})'.format(means, stdevs))
예제 #22
0
    def data_augmentation(
        cls,
        data,
        label,
        flip=True,
        mirror=True,
        multi_scale=True,
        rotate=True,
        bright=True,
        contrast=True,
    ):
        will_flip, will_mirror, will_multi_scale, will_rotate, will_bright, will_contrast = False, False, False, False, False, False

        if flip and random.random() < 0.5:
            will_flip = True
        if mirror and random.random() < 0.5:
            will_mirror = True
        if multi_scale and random.random() < 0.5:
            will_multi_scale = True
        if rotate and random.random() < 0.5:
            will_rotate = True
        if bright and random.random() < 0.5:
            will_bright = True
        if contrast and random.random() < 0.5:
            will_contrast = True

        if will_flip:
            label = label[::-1, :]
            data = data[:, ::-1, :]

        if will_mirror:
            label = label[:, ::-1]
            data = data[:, :, ::-1]

        if will_rotate:
            ## np to PIL
            data = np.uint8(data * 255)
            data = Image.fromarray(data.transpose((1, 2, 0)))
            label = Image.fromarray(np.uint8(label))
            alpha = 360 * (random.random() - 0.5)
            data = data.rotate(alpha)
            label = label.rotate(alpha)

            data = np.asarray(data)
            data = data.transpose((2, 0, 1)) / 255
            data = data.astype(np.float32)
            # print(data.dtype)
            label = np.asarray(label, dtype='float32')

        data_p = np.copy(data)
        label_p = np.copy(label)

        if will_multi_scale:
            scale_size = random.randint(200, 300)
            data = torch.from_numpy(np.copy(data)).unsqueeze(0)
            data = F.interpolate(data,
                                 size=(scale_size, scale_size),
                                 mode='bilinear',
                                 align_corners=True)
            data = data.squeeze().numpy()

            label = torch.from_numpy(np.copy(label)).unsqueeze(0).unsqueeze(0)
            label = F.interpolate(label,
                                  size=(scale_size, scale_size),
                                  mode='nearest')
            label = label.squeeze().numpy()

            crop_size = 256
            data_p = np.zeros((3, crop_size, crop_size), dtype='float32')
            label_p = np.zeros((crop_size, crop_size), dtype='float32')
            if scale_size > crop_size:
                x1 = random.randint(0, scale_size - crop_size)
                y1 = random.randint(0, scale_size - crop_size)
                data_p[:, :, :] = data[:, x1:x1 + crop_size, y1:y1 + crop_size]
                label_p[:, :] = label[x1:x1 + crop_size, y1:y1 + crop_size]
            else:
                data_p[:, 0:scale_size, 0:scale_size] = data
                label_p[0:scale_size, 0:scale_size] = label

        if will_bright:
            delta = 0.1
            delta = random.uniform(-delta, delta)
            data_p += delta
            data_p = data_p.clip(min=0, max=1)

        if will_contrast:
            alpha = random.uniform(0.5, 1.5)
            data_p *= alpha
            data_p = data_p.clip(min=0, max=1)

        return data_p, label_p
예제 #23
0
def run(args, kwargs):
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # args.snap_dir = snap_dir = \
    #    '/u/scottcao/scratch/idf/discrete_logisticoi_flows_8_levels_4__2020-02-23_12_29_53/'

    # ==================================================================================================================
    # SNAPSHOTS
    # ==================================================================================================================

    # ==================================================================================================================
    # LOAD DATA
    # ==================================================================================================================
    _, _, test_loader, args = load_dataset(args, **kwargs)

    final_model = torch.load(args.snap_dir + 'a.model')
    print(f"Number of params: {count_params(final_model)}")

    if hasattr(final_model, 'module'):
        final_model = final_model.module
    final_model = final_model.cuda()

    sizes = []
    errors = []
    bpds = []

    acc = timer.TimeAccumulator()

    t = 0
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            data = data.squeeze(0)

            with acc.execute():
                if args.cuda:
                    data = data.cuda()
                state, state_sizes, bpd, error = \
                    encode_images(data, final_model, decode=not args.no_decode)

            errors += [error]
            bpds.extend(bpd)
            sizes.extend(state_sizes)

            t += len(data)

            print('Examples: {}/{} bpd compression: {:.3f} error: {},'
                  ' analytical bpd {:.3f} time: {:.3f}'.format(
                      i,
                      len(test_loader.dataset),
                      np.sum(sizes) / np.prod(data.size()[1:]) / t,
                      np.sum(errors),
                      np.mean(bpds),
                      acc.mean_time_spent(),
                  ),
                  end="\r")

            # if args.no_decode:
            # print('Not testing decoding.')
            # else:
            # print('Error: {}'.format(np.sum(errors)))
    print()
    print('Final bpd: {:.3f} error: {}'.format(
        np.mean(sizes) / np.prod(data.size()[1:]), np.sum(errors)))
    print('Took {:.3f} seconds / example'.format(acc.mean_time_spent()))
예제 #24
0
def main(model, tst_dataloader, args):
    # calculate weights #
    acc = []
    running_corrects = 0
    running_tp = 0
    running_fp = 0
    running_fn = 0
    elements_count = 0.0
    count_lines = 0.0
    sum_many_ones = 0.0
    sum_all_zeros = 0.0
    total_matrices = 0.0
    wrong_matrices_1 = 0.0
    wrong_matrices_2 = 0.0
    for data, target in tst_dataloader:
        many_ones = 0
        not_all_zeros = 0
        curr_wrong_matrices_1 = 0
        curr_wrong_matrices_2 = 0

        if args.is_cuda:
            data = data.squeeze(0).cuda()
            target = target.squeeze(0).cuda()
        else:
            data = data.squeeze(0)
            target = target.squeeze(0)

        elements_count += data.shape[0] * data.shape[1] * data.shape[2]

        model.hidden_row = model.init_hidden(data.size(0))
        model.hidden_col = model.init_hidden(data.size(0))

        tag_scores = model(data).detach()

        # discretization #
        if args.row_wise:
            print("we are here")
            predicted = torch.zeros_like(tag_scores)
            for b in range(tag_scores.size(0)):
                for h in range(tag_scores.size(1)):
                    value, indice = tag_scores[b, h].max(0)
                    if float(value) > args.threshold:
                        predicted[b, h, int(indice)] = 1.0
        else:

            predicted = torch.zeros_like(tag_scores)
            for b in range(tag_scores.size(0)):
                for w in range(tag_scores.size(2)):
                    value, indice = tag_scores[b, :, w].max(0)
                    if float(value) > args.threshold:
                        predicted[b, int(indice), w] = 1.0

        # weighted accuracy #
        num_positive = target.data.view(target.size(0),
                                        -1).sum(dim=1).unsqueeze(1)
        # print num_positive
        weight2negative = num_positive.float() / (target.size(1) *
                                                  target.size(2))
        # case all zeros
        weight2negative.masked_fill_(
            (weight2negative == 0),
            10)  # 10 is just a symbolic value representing 1.0
        # case all ones
        weight2negative.masked_fill_((weight2negative == 1), 0.0)
        weight2negative.masked_fill_(
            (weight2negative == 10),
            1.0)  # change all 100 to their true value 1.0
        weight = torch.cat([weight2negative, 1.0 - weight2negative], dim=1)
        # print weight
        weight = weight.view(-1, 2, 1, 1).contiguous()
        if args.is_cuda:
            weight = weight.cuda()
        acc.append(eval_acc(tag_scores.data,
                            target.float().data, weight, args))

        # TP, TN, FP, FN, F1_score #
        target = target.float()
        running_corrects += torch.sum(predicted == target.data).double()
        running_tp += torch.sum(
            (predicted == target.data)[target.data == 1]).double()
        running_fp += torch.sum(
            (predicted != target.data)[predicted.data == 1]).double()
        running_fn += torch.sum(
            (predicted != target.data)[predicted.data == 0]).double()

        # constraints #
        if args.row_wise:
            print("we are here")
            for b in range(tag_scores.size(0)):
                wrong_flag_1 = False
                wrong_flag_2 = False
                for w in range(tag_scores.size(2)):
                    sum_column_predict = torch.sum(predicted[b, :, w])
                    sum_column_gt = torch.sum(target[b, :, w])
                    if sum_column_predict.float().item() > 1.0:
                        many_ones += 1.0
                        wrong_flag_1 = True

                    elif sum_column_gt.float().item(
                    ) == 0.0 and sum_column_predict.float().item() == 1.0:
                        not_all_zeros += 1.0
                        wrong_flag_2 = True
                    elif sum_column_gt.float().item(
                    ) == 1.0 and sum_column_predict.float().item() == 0.0:
                        not_all_zeros += 1.0
                        wrong_flag_2 = True
                if wrong_flag_1:
                    curr_wrong_matrices_1 += 1.0
                elif wrong_flag_2:
                    curr_wrong_matrices_2 += 1.0
            print(
                'curr wrong matrix rate:',
                (float(curr_wrong_matrices_1) + float(curr_wrong_matrices_2)) /
                target.shape[0])
            print('curr not_all_zeros matrices: ',
                  float(curr_wrong_matrices_2))
            print('curr many_ones matrices: ', float(curr_wrong_matrices_1))
            print('total wrong matrices: ',
                  float(curr_wrong_matrices_1) + float(curr_wrong_matrices_2))
            print('total matrices: ', target.shape[0])
            print('curr many_ones lines: ',
                  float(many_ones) / (target.shape[0] * target.shape[2]))
            count_lines += target.shape[0] * target.shape[2]
            total_matrices += target.shape[0]
            wrong_matrices_1 += curr_wrong_matrices_1
            wrong_matrices_2 += curr_wrong_matrices_2
            sum_all_zeros += not_all_zeros
            sum_many_ones += many_ones
            print()
        else:
            for b in range(tag_scores.size(0)):
                wrong_flag_1 = False
                wrong_flag_2 = False
                for h in range(tag_scores.size(1)):
                    sum_column_predict = torch.sum(predicted[b, h, :])
                    sum_column_gt = torch.sum(target[b, h, :])
                    if sum_column_predict.float().item() > 1.0:
                        many_ones += 1.0
                        wrong_flag_1 = True

                    elif sum_column_gt.float().item(
                    ) == 0.0 and sum_column_predict.float().item() == 1.0:
                        not_all_zeros += 1.0
                        wrong_flag_2 = True
                    elif sum_column_gt.float().item(
                    ) == 1.0 and sum_column_predict.float().item() == 0.0:
                        not_all_zeros += 1.0
                        wrong_flag_2 = True
                if wrong_flag_1:
                    curr_wrong_matrices_1 += 1.0
                elif wrong_flag_2:
                    curr_wrong_matrices_2 += 1.0
            print(
                'curr wrong matrix rate:',
                (float(curr_wrong_matrices_1) + float(curr_wrong_matrices_2)) /
                target.shape[0])
            print('curr not_all_zeros matrices: ',
                  float(curr_wrong_matrices_2))
            print('curr many_ones matrices: ', float(curr_wrong_matrices_1))
            print('total wrong matrices: ',
                  float(curr_wrong_matrices_1) + float(curr_wrong_matrices_2))
            print('total matrices: ', target.shape[0])
            print('curr many_ones lines: ',
                  float(many_ones) / (target.shape[0] * target.shape[1]))
            count_lines += target.shape[0] * target.shape[1]
            total_matrices += target.shape[0]
            wrong_matrices_1 += curr_wrong_matrices_1
            wrong_matrices_2 += curr_wrong_matrices_2
            sum_all_zeros += not_all_zeros
            sum_many_ones += many_ones
            print()

    epoch_acc = running_corrects.double() / elements_count
    tp = running_tp.double()
    fp = running_fp.double()
    fn = running_fn.double()
    tn = 1.0 * elements_count - tp - fp - fn
    p = tp / (tp + fp + 1e-9)
    r = tp / (tp + fn + 1e-9)

    J = r + (tn / (tn + fp)) - 1

    epoch_f1 = 2 * p * r / (p + r + 1e-9)

    print('fn: ', fn.item())
    print('fp: ', fp.item())
    print('tp: ', tp.item())
    print('tn: ', tn.item())
    print('total elements: ', elements_count)
    print('precision: ', p.item())
    print('recall: ', r.item())
    print('Youden value:', J.item())
    print('f1_score: ', epoch_f1.item())

    print('weighted acc: ', np.mean(np.array(acc)) * 100)

    print('constraints: ')
    print('not_all_zeros wrong lines rate: ', sum_all_zeros / count_lines)
    print('many ones wrong lines rate: ', many_ones / count_lines)
    print('total wrong matrices: ', wrong_matrices_1 + wrong_matrices_2)
    print('total matrices: ', total_matrices)
    print('many ones wrong matrices', wrong_matrices_1)
    print('many ones wrong matrices rate', wrong_matrices_1 / total_matrices)
    print('not all zeros wrong matrices', wrong_matrices_2)
    print('not all zeros wrong matrices rate',
          wrong_matrices_2 / total_matrices)
예제 #25
0
    def _getitem_fixed_size(self, index):
        if self.training:
            index_ratio = int(self.ratio_index[index])
        else:
            index_ratio = index

        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index_ratio]]
        blobs = get_minibatch(minibatch_db, self._num_classes, self.training)
        data = torch.from_numpy(blobs['data'])

        data = data.squeeze(0).permute(2, 0, 1).contiguous()
        im_info = torch.from_numpy(blobs['im_info'])
        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            # grasp data
            num_grasps = 0
            gt_grasps_padding = torch.FloatTensor(self.max_num_grasp,
                                                  8).zero_()
            gt_grasp_inds_padding = torch.FloatTensor(
                self.max_num_grasp).zero_()

            if 'gt_grasps' in blobs:
                shuffle_inds_gr = range(blobs['gt_grasps'].shape[0])
                np.random.shuffle(shuffle_inds_gr)
                shuffle_inds_gr = torch.LongTensor(shuffle_inds_gr)

                gt_grasps = torch.from_numpy(blobs['gt_grasps'])
                gt_grasps = gt_grasps[shuffle_inds_gr]

                if 'gt_grasp_inds' in blobs:
                    gt_grasp_inds = torch.from_numpy(blobs['gt_grasp_inds'])
                    gt_grasp_inds = gt_grasp_inds[shuffle_inds_gr]

                num_grasps = min(gt_grasps.size(0), self.max_num_grasp)
                gt_grasps_padding[:num_grasps, :] = gt_grasps[:num_grasps]
                if 'gt_grasp_inds' in blobs:
                    gt_grasp_inds_padding[:
                                          num_grasps] = gt_grasp_inds[:
                                                                      num_grasps]

            # object detection data
            # 4 coordinates (xmin, ymin, xmax, ymax) and 1 label
            num_boxes = 0
            gt_boxes_padding = torch.FloatTensor(self.max_num_box, 5).zero_()
            rel_mat = torch.FloatTensor(self.max_num_box,
                                        self.max_num_box).zero_()

            if 'gt_boxes' in blobs:
                shuffle_inds_bb = range(blobs['gt_boxes'].shape[0])
                np.random.shuffle(shuffle_inds_bb)
                shuffle_inds_bb = torch.LongTensor(shuffle_inds_bb)

                gt_boxes = torch.from_numpy(blobs['gt_boxes'])
                gt_boxes = gt_boxes[shuffle_inds_bb]

                not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (
                    gt_boxes[:, 1] == gt_boxes[:, 3])
                keep = torch.nonzero(not_keep == 0).view(-1)

                if keep.numel() != 0:
                    gt_boxes = gt_boxes[keep]
                    shuffle_inds_bb = shuffle_inds_bb[keep]

                    num_boxes = min(gt_boxes.size(0), self.max_num_box)
                    gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]

                    # get relationship matrix
                    if 'nodeinds' in blobs:
                        for o1 in range(num_boxes):
                            for o2 in range(num_boxes):
                                ind_o1 = blobs['nodeinds'][
                                    shuffle_inds_bb[o1].item()]
                                ind_o2 = blobs['nodeinds'][
                                    shuffle_inds_bb[o2].item()]
                                if ind_o2 == ind_o1 or rel_mat[o1,
                                                               o2].item() != 0:
                                    continue
                                o1_children = blobs['children'][
                                    shuffle_inds_bb[o1].item()]
                                o1_fathers = blobs['fathers'][
                                    shuffle_inds_bb[o1].item()]
                                if ind_o2 in o1_children:
                                    # o1 is o2's father
                                    rel_mat[o1, o2] = cfg.VMRN.FATHER
                                elif ind_o2 in o1_fathers:
                                    # o1 is o2's child
                                    rel_mat[o1, o2] = cfg.VMRN.CHILD
                                else:
                                    # o1 and o2 has no relationship
                                    rel_mat[o1, o2] = cfg.VMRN.NOREL

            # transfer index into sequence number of boxes returned, and filter out grasps belonging to dropped boxes.
            if 'gt_grasp_inds' in blobs:
                gt_grasp_inds_padding_ori = gt_grasp_inds_padding.clone()
                order2inds = dict(enumerate(blobs['nodeinds']))
                inds2order = dict(zip(order2inds.values(), order2inds.keys()))
                shuffle2order = dict(enumerate(shuffle_inds_bb.data.numpy()))
                order2shuffle = dict(
                    zip(shuffle2order.values(), shuffle2order.keys()))

                # make box index begins with 1
                for key in order2shuffle.keys():
                    order2shuffle[key] += 1

                for ind in blobs['nodeinds']:
                    gt_grasp_inds_padding[gt_grasp_inds_padding_ori == \
                                          float(ind)] = float(order2shuffle[inds2order[ind]])

            im_info = im_info.view(4)

            # im2show = data.clone()
            # label = gt_grasps[::10].clone()
            # print(blobs['img_id'])
            # self._show_label(im2show=im2show,gt_boxes=label, filename = os.path.basename(blobs['img_path']))

            return data, im_info, gt_boxes_padding, gt_grasps_padding, num_boxes, num_grasps, rel_mat, gt_grasp_inds_padding

        else:
            im_info = im_info.view(4)
            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            gt_grasps = torch.FloatTensor([1, 1, 1, 1, 1, 1, 1, 1])
            gt_grasp_inds = torch.FloatTensor([0])
            num_boxes = 0
            num_grasps = 0
            rel_mat = torch.FloatTensor([0])

            return data, im_info, gt_boxes, gt_grasps, num_boxes, num_grasps, rel_mat, gt_grasp_inds
예제 #26
0
input = torch.FloatTensor(opt.batchSize, opt.nc, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(size_fullbatch, nz, 1, 1).normal_(0, 1)
if nz == 2:
    journal.add_data('mu', fixed_noise)
journal.add_data('generated', netG(fixed_noise), 0)
one = torch.FloatTensor([1])
mone = one * -1

nu_fullbatch = torch.zeros(size_fullbatch, nc)
i = 0
data_iter = iter(dataloader)
while i < size_fullbatch:
    data = next(data_iter)
    nu_fullbatch = torch.cat((nu_fullbatch, data.squeeze()), dim=0)
    i += data.size(0)

if opt.cuda:
    netD.cuda()
    netG.cuda()
    input = input.cuda()
    one, mone = one.cuda(), mone.cuda()
    noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

# setup optimizer
if opt.adam:
    optimizerD = optim.Adam(netD.parameters(),
                            lr=opt.lrD,
                            betas=(opt.beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(),