Пример #1
0
def inverse_warp_unit(img, proj_coords, warp_img, index):
    num, channel, height, width = img.size()

    x = index % width
    y = (index / width) % height
    n = index / width / height

    xx = proj_coords[n, 0, y, x]
    yy = proj_coords[n, 1, y, x]

    x1 = torch.floor(xx)
    x2 = x1 + 1
    y1 = torch.floor(yy)
    y2 = y1 + 1

    wx2 = xx - torch.float(x1)
    wx1 = float(x2) - xx
    wy2 = yy - torch.float(y1)
    wy1 = float(y2) - yy

    for cc in range(channel):
        if (x1 >= 0 and x1 <= width-1 and y1 >= 0 and y1 <= height-1):
            warp_img[n, cc, y, x] += wx1 * wy1 * img[n, cc, y1, x1]
        if (x1 >= 0 and x1 <= width-1 and y2 >= 0 and y2 <= height-1):
            warp_img[n, cc, y, x] += wx1 * wy2 * img[n, cc, y2, x1]
        if (x2 >= 0 and x2 <= width-1 and y1 >= 0 and y1 <= height-1):
            warp_img[n, cc, y, x] += wx2 * wy1 * img[n, cc, y1, x2]
        if (x2 >= 0 and x2 <= width-1 and y2 >= 0 and y2 <= height-1):
            warp_img[n, cc, y, x] += wx2 * wy2 * img[n, cc, y2, x2]
Пример #2
0
    def get_or_guess_labels(self, x, kwargs):
        """
        Get the label to use in generating an adversarial example for x.
        The kwargs are fed directly from the kwargs of the attack.

        If 'y' is in kwargs, then assume its an untargetted attack and use
        that as the label.

        If 'y_target' is in kwargs and is not None, then assume it's a 
        targetted attack and use that as the label.

        Otherwise, use the model's prediction as the label and perform an 
        untargetted attack.
        """

        if 'y' in kwargs and 'y_target' in kwargs:
            raise ValueError("Cannot set both 'y' and 'y_target'")
        elif 'y' in kwargs:
            labels = kwargs['y']
        elif 'y_target' in kwargs:
            labels = kwargs['y_target']
        else:
            # TODO Make sure softmax outputs are not again
            # passed through softmax layer
            # TODO Make sure this function is implemented as expected
            logits = self.model(x if len(x.shape) == 4 else x.unsqueeze(0))
            pred_max = torch.argmax(logits, dim=1)
            opreds = torch.float(logits == pred_max)
            opreds.requires_grad = False

        nb_classes = opreds.size(1)
        return opreds, nb_classes
Пример #3
0
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma**2
    diff = in_weight * (x - t.float())
    abs_diff = diff.abs()
    flag = (abs_diff.data < (1. / sigma2)).float()
    y = (flag * (sigma2 / 2.) * (diff**2) + (1 - flag) *
         (abs_diff - 0.5 / sigma2))
    return y.sum()
Пример #4
0
 def forward(self, inputs):
     if not self.training or self.dropout == 0:
         return inputs
     else:
         dropmask = torch.bernoulli(
             torch.full(inputs.size(), self.dropout,
                        dtype=torch.float())).byte()
         inputs = inputs.clone()
         inputs[dropmask] = self.unk_id
         return inputs
Пример #5
0
	def __getitem__(self, index):
		img_paths = self.img_paths[index]
		label = self.labels[index]
		imgs = nib.load(img_paths)
		imgs = imgs.get_data()
		imgs = torch.from_numpy(imgs)
		imgs = torch.float().unsqueeze(0).unsqueeze(0)
		imgs = F.interpolate(imgs, size=self.opt.s, mode='trilinear')
		imgs = imgs.squeeze(0)
		return imgs, label, img_paths
def mmd2_rbf(X, t, p, sig):
    """ Computes the l2-RBF MMD for X given t """
    X = X.squeeze(0)

    it = torch.where(t > 0)[1]
    ic = torch.where(t < 1)[1]

    Xc = torch.index_select(X, 1, ic)
    Xt = torch.index_select(X, 1, it)

    Kcc = torch.exp(-pdist2sq(Xc, Xc) / np.square(sig))
    Kct = torch.exp(-pdist2sq(Xc, Xt) / np.square(sig))
    Ktt = torch.exp(-pdist2sq(Xt, Xt) / np.square(sig))
    m = torch.float(torch.shape(Xc)[0])
    n = torch.float(torch.shape(Xt)[0])

    mmd = torch.square(1.0 - p) / (m * (m - 1.0)) * (torch.sum(Kcc) - m)
    mmd = mmd + torch.square(p) / (n * (n - 1.0)) * (torch.sum(Ktt) - n)
    mmd = mmd - 2.0 * p * (1.0 - p) / (m * n) * torch.sum(Kct)
    mmd = 4.0 * mmd

    return mmd
Пример #7
0
 def __init__(self, m=None, validate_args=None):
     # sanity checks
     if m is None:
         raise ValueError("Polarization `m` must be specified.")
     if not torch.is_tensor(m):
         m = torch.tensor(m)
     if not torch.is_floating_point(m):
         m = torch.float(m)
     self.m = m
     self._param = self.m
     # determine batch size
     if isinstance(self.m, Number):
         batch_shape = torch.Size()
     else:
         batch_shape = self.m.size()
     super().__init__(batch_shape, validate_args=validate_args)
Пример #8
0
def iou(pred, target, n_class):
    ious = []
    for cls in range(n_class):
        pred_inds = pred == cls + 1
        target_inds = target == cls + 1
        intersection = torch.sum(pred_inds[target_inds])
        union = torch.sum(pred_inds) + torch.sum(target_inds) - intersection
        if union == 0:
            ious.append(
                torch.float('nan')
            )  # if there is no ground truth, do not include in evaluation
        else:
            ious.append(intersection.float() /
                        torch.max(union.float(),
                                  torch.tensor(1.).float().cuda()))
    return ious
Пример #9
0
def _smooth_l1_loss(x, t, in_weight, sigma):
    sigma2 = sigma**2
    # print ("------------")
    # print ("in_weight: ", in_weight)
    # print ("------------")
    # print ("x: ", x)
    # print ("------------")
    # print ("t: ", t)
    # print ("------------")
    t = t.float()
    diff = in_weight * (x - t)
    abs_diff = diff.abs()
    flag = (abs_diff.data < (1. / sigma2)).float()
    flag = Variable(flag)
    y = (flag * (sigma2 / 2.) * (diff**2) + (1 - flag) *
         (abs_diff - 0.5 / sigma2))
    return y.sum()
Пример #10
0
 def __init__(self, batch):
     self.X, self.y = [torch.float(b) for b in zip(*batch)]
Пример #11
0
    def train(self):
        """Train StarGAN within a single dataset."""
        # Set data loader.
        # if self.dataset == 'CelebA':
        #     data_loader = self.celeba_loader
        # elif self.dataset == 'RaFD':
        #     data_loader = self.rafd_loader
        # elif self.dataset == 'adience':
        #     data_loader = self.adience_loader
        data_loader = self.classification_loader
        test_loader = self.test_loader
        self.log_name = os.path.join(self.log_dir, 'loss_log.txt')
        # self.transform(image), torch.FloatTensor(one), torch.FloatTensor(cost_one), torch.FloatTensor(y_sig01)
        with open(self.log_name, "a") as log_file:
            now = time.strftime("%c")
            log_file.write(
                '================ Training Loss (%s) ================\n' % now)

        # Fetch fixed inputs for debugging.

        # Learning rate cache for decaying.
        classification_lr = self.classification_lr

        # Start training from scratch or resume training.
        start_iters = 0
        if self.resume_iters:
            start_iters = self.resume_iters
            self.restore_model(self.resume_iters)

        # Start training.
        print('Start training...')
        start_time = time.time()
        for i in range(start_iters, self.num_iters):

            # =================================================================================== #
            #                             1. Preprocess input data                                #
            # =================================================================================== #

            # Fetch real images and labels.
            try:
                image, one, cost_one, y_sig01 = next(data_iter)
            except:
                data_iter = iter(data_loader)
                image, one, cost_one, y_sig01 = next(data_iter)

            # =================================================================================== #
            #                             2. Train the discriminator                              #
            # =================================================================================== #

            # Compute loss with real images.
            class_fc1, class_fc2 = self.classification_modle(image)

            Cossloss = getCossloss(class_fc1, one, cost_one)
            KLloss = getKLloss(class_fc2, y_sig01)

            outloss = Cossloss + KLloss
            self.reset_grad()
            outloss.backward()
            self.classification_optimizer.step()

            # Logging.
            loss = {}
            loss['classfication/Cossloss'] = Cossloss.item()
            loss['classfication/KLloss'] = KLloss.item()

            # =================================================================================== #
            #                               3. Train the generator                                #
            # =================================================================================== #

            # =================================================================================== #
            #                                 4. Miscellaneous                                    #
            # =================================================================================== #

            # Print out training information.
            if (i + 1) % self.log_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")

                log = "{}, Elapsed [{}], Iteration [{}/{}]".format(
                    log_time, et, i + 1, self.num_iters)
                for tag, value in loss.items():
                    log += ", {}: {:.4f}".format(tag, value)
                print(log)
                with open(self.log_name, "a") as log_file:
                    log_file.write('%s\n' % log)  # save the message

                if self.use_tensorboard:
                    for tag, value in loss.items():
                        self.logger.scalar_summary(tag, value, i + 1)

            # # Translate fixed images for debugging.
            # if (i + 1) % self.sample_step == 0:
            #     with torch.no_grad():
            #         x_fake_list = [x_fixed]
            #         for c_fixed in c_fixed_list:
            #             x_fake_list.append(self.G(x_fixed, c_fixed))
            #         x_concat = torch.cat(x_fake_list, dim=3)
            #         sample_path = os.path.join(self.sample_dir, '{}-images.jpg'.format(i + 1))
            #         save_image(self.denorm(x_concat.data.cpu()), sample_path, nrow=1, padding=0)
            #         print('Saved real and fake images into {}...'.format(sample_path))

            # Save model checkpoints.
            if (i + 1) % self.model_save_step == 0:
                classification_path = os.path.join(
                    self.model_save_dir,
                    '{}-classification.ckpt'.format(i + 1))
                torch.save(self.classification_modle.state_dict(),
                           classification_path)
                print('Saved model checkpoints into {}...'.format(
                    self.model_save_dir))

            # Decay learning rates.
            if (i + 1) % self.lr_update_step == 0 and (i + 1) > (
                    self.num_iters - self.num_iters_decay):
                classification_lr -= (self.classification_lr /
                                      float(self.num_iters_decay))

                self.update_lr(classification_lr)
                lr_str = 'Decayed learning rates, classification_lr: {}.'.format(
                    classification_lr)
                with open(self.log_name, "a") as log_file:
                    log_file.write('%s\n' % lr_str)  # save the message

            if (i + 1) % self.sample_step == 0:
                CA3_sum = 0
                CA5_sum = 0
                for i_train_batch, train_batch in enumerate(test_loader):
                    image, one, cost_one, y_sig01 = train_batch
                    class_fc1, class_fc2 = self.classification_modle(image)
                    result_index = torch.argmax(class_fc2, dim=-1)
                    CA3 = torch.abs(result_index - y_sig01) <= 3
                    CA5 = torch.abs(result_index - y_sig01) <= 5
                    CA3_sum += torch.sum(CA3)
                    CA5_sum += torch.sum(CA5)
                CA3_prescision = CA3_sum / torch.float(len(test_loader))
                CA5_prescision = CA5_sum / torch.float(len(test_loader))
                print('Saved model checkpoints into {}...'.format(
                    self.model_save_dir))