Esempio n. 1
0
def _v_trace(
    gamma,
    gae_lambda,
    value,
    rewards,
    not_done,
    old_log_pi,
    log_pi,
):
    rollout_len = rewards.size(0)
    v_trace = value[-1]
    returns = []
    ratio = (log_pi - old_log_pi).exp()
    ratio = torch.clamp(ratio, 0.05, 20.0)

    for t in reversed(range(rollout_len)):
        rho = torch.clamp_min_(ratio[t], 1.0).unsqueeze(-1)
        c = gae_lambda * torch.clamp_min_(ratio[t], 1.0).unsqueeze(-1)

        delta = rho * (rewards[t] + gamma * not_done[t] * value[t + 1] -
                       value[t])
        v_trace = value[t] + delta + gamma * not_done[t] * c * (v_trace -
                                                                value[t + 1])
        returns.append(v_trace)
    returns = torch.stack(returns[::-1])
    return returns
Esempio n. 2
0
def hsigmoid(x, slope=0.2, offset=0.5):
    """
    pytorch中也自带有该函数,但是参数和paddle的有差别,公式为 x / 6 + 0.5, paddle里的是 x / 5 + 0.5
    :param x: 输入
    :param slope:
    :param offset:
    :return:
    """
    out = x * slope + offset
    torch.clamp_max_(out, 1)
    torch.clamp_min_(out, 0)
    return out
Esempio n. 3
0
    def forward(self, x, t):  # pylint: disable=arguments-differ
        t_zeroone = t.clone()
        t_zeroone[t_zeroone > 0.0] = 1.0
        # x = torch.clamp(x, -20.0, 20.0)
        if self.background_clamp is not None:
            bg_clamp_mask = (t_zeroone == 0.0) & (x < self.background_clamp)
            x[bg_clamp_mask] = self.background_clamp
        bce = torch.nn.functional.binary_cross_entropy_with_logits(
            x, t_zeroone, reduction='none')
        # torch.clamp_max_(bce, 10.0)
        if self.soft_clamp is not None:
            bce = self.soft_clamp(bce)
        if self.min_bce > 0.0:
            torch.clamp_min_(bce, self.min_bce)

        if self.focal_gamma != 0.0:
            p = torch.sigmoid(x)
            pt = p * t_zeroone + (1 - p) * (1 - t_zeroone)
            # Above code is more stable than deriving pt from bce: pt = torch.exp(-bce)

            if self.focal_clamp and self.min_bce > 0.0:
                pt_threshold = math.exp(-self.min_bce)
                torch.clamp_max_(pt, pt_threshold)

            focal = 1.0 - pt
            if self.focal_gamma != 1.0:
                focal = (focal + 1e-4)**self.focal_gamma

            if self.focal_detach:
                focal = focal.detach()

            bce = focal * bce

        if self.focal_alpha == 0.5:
            bce = 0.5 * bce
        elif self.focal_alpha >= 0.0:
            alphat = self.focal_alpha * t_zeroone + (1 - self.focal_alpha) * (
                1 - t_zeroone)
            bce = alphat * bce

        weight_mask = t_zeroone != t
        bce[weight_mask] = bce[weight_mask] * t[weight_mask]

        if self.background_weight != 1.0:
            bg_weight = torch.ones_like(t, requires_grad=False)
            bg_weight[t == 0] *= self.background_weight
            bce = bce * bg_weight

        return bce
Esempio n. 4
0
def dist_iou_ab(box_a: Tensor, box_b: Tensor, eps=EPS):
    """
    Args:
        box_a: tensor of shape [batch_size, boxes_a, 4]
        box_b: tensor of shape [batch_size, boxes_b, 4]
        gamma: float
        eps: float

    Original:
    https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/functions/detection.py#L162
    https://github.com/Zzh-tju/CIoU/blob/8995056b1e93b86d03c384f042514391b70e58e0/layers/box_utils.py#L82
    """
    assert box_a.dim() == 3
    assert box_b.dim() == 3
    assert box_a.size(0) == box_b.size(0)

    A, B = box_a.size(1), box_b.size(1)
    box_a = box_a.unsqueeze(2).expand(-1, -1, A, -1)
    box_b = box_b.unsqueeze(1).expand(-1, B, -1, -1)

    inter_yx0 = torch.max(box_a[..., :2], box_b[..., :2])
    inter_yx1 = torch.min(box_a[..., 2:4], box_b[..., 2:4])

    inter_hw = torch.clamp_min_(inter_yx1 - inter_yx0, 0)
    inter_area = torch.prod(inter_hw, dim=-1)
    # del inter_hw, inter_yx0, inter_yx1

    hw_a = box_a[..., 2:4] - box_a[..., :2]
    hw_b = box_b[..., 2:4] - box_b[..., :2]

    area_a = torch.prod(hw_a, dim=-1)
    area_b = torch.prod(hw_b, dim=-1)

    union_area = area_a + area_b - inter_area
    iou = inter_area / (union_area + eps)
    # del inter_area, union_area, area_a, area_b, hw_a, hw_b

    center_a = (box_a[..., :2] + box_a[..., 2:4]) / 2
    center_b = (box_b[..., :2] + box_b[..., 2:4]) / 2
    inter_diag = torch.pow(center_b - center_a, 2).sum(dim=-1)

    clos_yx0 = torch.min(box_a[..., :2], box_b[..., :2])
    clos_yx1 = torch.max(box_a[..., 2:4], box_b[..., 2:4])
    clos_hw = torch.clamp_min_(clos_yx1 - clos_yx0, 0)
    clos_diag = torch.pow(clos_hw, 2).sum(dim=-1)
    # del clos_yx0, clos_yx1, clos_hw, center_a, center_b

    dist = inter_diag / (clos_diag + eps)
    return iou - dist**0.9
Esempio n. 5
0
    def datagen_func(self, S, X_os, Y_os, Z, I):

        batch_size, n_inp, w, h = S.shape[0], S.shape[1], S.shape[2], S.shape[
            3]
        xyzi = torch.cat([
            X_os.reshape([-1, 1, h, w]),
            Y_os.reshape([-1, 1, h, w]),
            Z.reshape([-1, 1, h, w]),
            I.reshape([-1, 1, h, w])
        ], 1)
        recs = self.genfunc(S.reshape([-1, h, w]), xyzi)
        torch.clamp_min_(recs, 0)
        x_sim = recs.reshape([batch_size, n_inp, h, w])

        return x_sim
Esempio n. 6
0
def average_buckets(tensor: torch.Tensor, quant_weight: torch.Tensor,
                    n_bins: int):
    bin_sums = torch.zeros(n_bins).scatter_add_(0,
                                                quant_weight.flatten().long(),
                                                tensor.flatten())
    bin_counts = torch.clamp_min_(
        torch.bincount(quant_weight.flatten(), minlength=n_bins), 1)
    lookup = bin_sums / bin_counts
    return lookup
Esempio n. 7
0
def window_stdev(X, window_size, kernel):
    X = F.pad(X, [
        window_size // 2, window_size // 2, window_size // 2, window_size // 2
    ],
              mode='reflect')
    c1 = F.conv2d(X, kernel)
    c2 = F.conv2d(torch.pow(X, 2), kernel)
    t = c2 - c1 * c1
    return torch.sqrt(torch.clamp_min_(t, 0))
Esempio n. 8
0
 def __call__(self, prediction: torch.Tensor, target: torch.Tensor,
              mask: torch.Tensor, disc_real: torch.Tensor,
              disc_fake: torch.Tensor):
     """
     Args:
         prediction (tensor): output of the inpainting network
         target (tensor): typically the original image
         mask (tensor): mask which covers the area to be filled in
         disc_real (tensor): output of the discriminator for the real image
         disc_fake (tensor): output of the discriminator for the fake image
     """
     mask *= self.beta
     torch.clamp_min_(mask, 1.)
     mask /= self.beta
     rec_loss = torch.sum(mask * F.l1_loss(
         prediction, target, reduction='none')) / torch.sum(mask)
     adv_real_loss = self.gan_loss(disc_real, True)
     adv_fake_loss = self.gan_loss(disc_fake, False)
     return self.alpha[0] * rec_loss + self.alpha[1] * (adv_real_loss +
                                                        adv_fake_loss) / 2
    def _compute(self, current, target):
        """
        Updates the target parameter(s) based on the current parameter(s).

        Args:
            current (int, float, torch.tensor, np.array, torch.nn.Module): current parameter(s).
            target (int, float, torch.tensor, np.array, torch.nn.Module): target parameter(s) to be modified based on
            the current parameter(s).

        Returns:
            int, float, torch.tensor, np.array, torch.nn.Module: updated target parameter(s).
        """
        if isinstance(target, torch.nn.Module):
            if isinstance(current, torch.nn.Module):
                for p1, p2 in zip(target.parameters(), current.parameters()):
                    data = p2.data + self.speed * p2.data * self.dt
                    if self.speed < 0:
                        torch.clamp_min_(data, self.end)
                    else:
                        torch.clamp_max_(data, self.end)
                    p1.data.copy_(data)
        elif isinstance(target, torch.Tensor):
            data = current.data + self.speed * current.data * self.dt
            if self.speed < 0:
                torch.clamp_min_(data, self.end)
            else:
                torch.clamp_max_(data, self.end)
            target.data.copy_(data)
        elif isinstance(target, np.ndarray):
            target[:] = current + self.speed * current * self.dt
            if (self.speed < 0
                    and target < self.end) or (self.speed > 0
                                               and target > self.end):
                target[:] = self.end
        else:
            target = current + self.speed * current * self.dt
            if (self.speed < 0
                    and target < self.end) or (self.speed > 0
                                               and target > self.end):
                target = self.end
        return target
    def __init__(self, config: Namespace) -> None:
        self.images = ImageFolder(config.image_folder)
        self.n_images = len(self.images)

        labels = pd.read_csv(config.attr_csv)

        # Train only on small selection of domain labels
        labels = labels[config.choice_labels][:self.n_images]

        # Binary cross entropy loss with logits expects floats
        self.labels = torch.FloatTensor(labels.values)

        # Change -1 values to 0
        torch.clamp_min_(self.labels, 0)

        # The boundary between the training set and test set
        self.train_test_split = config.train_size

        # Preprocessing transformations applied to images
        self.transforms = {
            # Crop image to a square, as conv nets will perform better
            # Make the image smaller so training is faster
            # Normalize pixels to the range [-1, 1] for the model
            'augumented':
            T.Compose([
                # Random transformations can create new examples
                # and increase the size of the dataset
                T.RandomResizedCrop(config.image_end[2]),
                T.RandomHorizontalFlip(),
                T.ToTensor(),
                T.Normalize(config.image_mean, config.image_std)
            ]),
            'standard':
            T.Compose([
                T.CenterCrop(config.image_start[1]),
                T.Resize(config.image_end[2]),
                T.ToTensor(),
                T.Normalize(config.image_mean, config.image_std)
            ])
        }
Esempio n. 11
0
def quantile_encode_approx(tensor: torch.Tensor,
                           n_bits: int) -> Tuple[torch.Tensor, torch.Tensor]:
    n_bins = 2**n_bits
    borders = torch.as_tensor(
        quantile_qq_approximation(tensor.numpy(), n_bins + 1)[1:-1])
    quant_weight = torch.clamp_(torch.bucketize(tensor, borders), 0,
                                n_bins - 1)
    bin_sums = torch.zeros(n_bins).scatter_add_(0, quant_weight.flatten(),
                                                tensor.flatten())
    bin_counts = torch.clamp_min_(
        torch.bincount(quant_weight.flatten(), minlength=n_bins), 1)
    lookup = bin_sums / bin_counts
    return quant_weight, lookup
Esempio n. 12
0
def batch_span_jaccard(candidate_spans: torch.Tensor,
                       golden_span: torch.Tensor):
    diff_tensor = candidate_spans - golden_span.unsqueeze(1)
    diff_tensor[:, :, 0] = -diff_tensor[:, :, 0]
    diff_tensor.clamp_max_(0)
    diff_sum = diff_tensor.sum(-1)
    golden_span_width = (golden_span[:, 1] - golden_span[:, 0]) + 1
    candidate_span_width = (candidate_spans[:, :, 1] -
                            candidate_spans[:, :, 0]) + 1
    dividend = torch.clamp_min_(diff_sum + golden_span_width.unsqueeze(1), 0)
    divisor = (candidate_span_width -
               dividend) + golden_span_width.unsqueeze(1)
    span_jaccard = torch.true_divide(dividend, divisor)
    return span_jaccard
Esempio n. 13
0
def iou(src, tgts):
    # Intersection
    i_x0 = torch.max(src[0], tgts[:, 0])
    i_y0 = torch.max(src[1], tgts[:, 1])
    i_x1 = torch.min(src[2], tgts[:, 2])
    i_y1 = torch.min(src[3], tgts[:, 3])

    i_w = torch.clamp_min(i_x1 - i_x0, 0)
    i_h = torch.clamp_min(i_y1 - i_y0, 0)

    intersection_area = i_w * i_h

    # Union
    src_wh = src[2:4] - src[:2] + 1
    torch.clamp_min_(src_wh, 0)
    src_area = torch.prod(src_wh, 0)

    tgts_wh = tgts[:, 2:4] - tgts[:, :2] + 1
    torch.clamp_min_(tgts_wh, 0)
    tgts_areas = torch.prod(tgts_wh, 1)

    union_area = src_area + tgts_areas

    return intersection_area / (union_area - intersection_area)
Esempio n. 14
0
    def compute_act_stabilizing_loss_abstract(self,
                                              inputs: torch.Tensor,
                                              eps: float,
                                              inputs_min: float = 0,
                                              inputs_max: float = 1):
        """compute an extra loss for stabilizing the activations using abstract
            interpretation

        :return: loss value
        """
        loss = torch.tensor(0, dtype=torch.float32, device=inputs.device)
        with torch.no_grad():
            imin = torch.clamp_min_(inputs - eps, inputs_min)
            imax = torch.clamp_max_(inputs + eps, inputs_max)
        return self.forward(AbstractTensor(imin, imax, loss)).loss
Esempio n. 15
0
 def forward_with_multi_sample(self,
                               x: torch.Tensor,
                               x_adv: torch.Tensor,
                               eps: float,
                               inputs_min: float = 0,
                               inputs_max: float = 1):
     """forward with randomly sampled perturbations and compute a
     stabilization loss """
     data = [x_adv, None, None]
     eps = float(eps)
     with torch.no_grad():
         delta = torch.empty_like(x).random_(0, 2).mul_(2 * eps).sub_(eps)
         data[1] = torch.clamp_min_(x - delta, inputs_min)
         data[2] = torch.clamp_max_(x + delta, inputs_max)
         data = torch.cat([i[np.newaxis] for i in data], dim=0)
     y = self.forward(MultiSampleTensor.from_squeeze(data))
     return y.as_expanded_tensor()[0], y.loss
Esempio n. 16
0
def normalize_emb(x):
    # return  x/float(length)
    veclen = torch.clamp_min_(torch.norm(x, 2, -1, keepdim=True), 1.0)
    ret = x / veclen
    return ret.detach()
Esempio n. 17
0
    def forward_train(self, box, score, feat):
        if self.feat_detach:
            feat = feat.detach()
        box = box.detach()
        score = score.detach()
        # boxes:    (B, 4, N_BOX)
        # score:    (B, 1, N_BOX)
        # feature:  (B, C, N_BOX)

        if self.train_threshold is not None:
            keep_args = torch.nonzero(
                torch.mean(score, dim=0)[0] > self.train_threshold).reshape(-1)
            if len(keep_args) > 0:
                box = box[:, :, keep_args]
                score = score[:, :, keep_args]
                feat = feat[:, :, keep_args]

        box = box.transpose(1, 2)
        score = score.transpose(1, 2)
        # boxes:    (B, N_BOX, 4)
        # score:    (B, N_BOX, 1)

        B, _, N_BOX = box.shape
        if (N_BOX > self.train_max_box) and (self.train_max_box is not None):
            score, keep_args = torch.topk(score,
                                          k=self.train_max_box,
                                          dim=1,
                                          sorted=False)
            box = torch.stack([box[b, keep_args[b, :, 0]] for b in range(B)],
                              dim=0)
            feat = torch.stack(
                [feat[b, :, keep_args[b, :, 0]] for b in range(B)], dim=0)
        # boxes:    (B, MAX_BOX, 4)
        # score:    (B, MAX_BOX, 1)

        box_bins = torch.split(box, self.train_bin_size, dim=1)
        masks = list()
        for box_bin in box_bins:
            iou_pair = torch.stack(
                [ops.box_iou(box[b], box_bin[b]) for b in range(B)], dim=0)
            masks_bin = [
                self.__generate_mask(iou_pair, score, threshold)
                for threshold in self.iou_thresholds
            ]
            masks_bin = torch.cat(masks_bin, dim=1)
            # iou_pair:     (B, N_BOX, BIN_SIZE)
            # masks_bin:    (B, N_MASKS, BIN_SIZE)

            masks.append(masks_bin)
        masks = torch.cat(masks, dim=2)
        # masks:    (B, N_MASKS, N_BOX)

        mask_w = self.mask_w_layers(feat)
        loc_max = torch.sum(masks.detach().float() * mask_w,
                            dim=1,
                            keepdim=True)
        # mask_w:   (B, N_MASKS, N_BOX)
        # loc_max:  (B, 1, #box)

        # pi = torch.softmax(self.pi_layers(feat), dim=2)
        pi = torch.exp(self.pi_layers(feat))
        # pi_score = (torch.exp(self.pi_layers(feat)) + self.epsilon) * loc_max
        # pi_score = (torch.sigmoid(self.pi_layers(feat)) * loc_max + self.epsilon)
        # pi = pi_score / torch.sum(pi_score, dim=2, keepdim=True)
        # pi = torch.softmax(self.pi_layers(feat), dim=2)
        mu = self.rescale_factor * box.transpose(1, 2).detach()
        # mu = box.transpose(1, 2).detach()

        min_gamma = self.gamma_factor * torch.clamp_min_(
            torch.cat([mu[:, 2:4] - mu[:, 0:2]] * 2,
                      dim=1), min=self.epsilon).detach()
        gamma = self.gamma_layers(feat) + min_gamma

        # print('')
        # print(mask_w.shape, masks.shape, loc_max.shape)
        # print(pi_score.shape, pi.shape)
        # print(mu.shape, gamma.shape, min_gamma.shape)

        self.mean_mask_w_list.append(torch.mean(mask_w, dim=2, keepdim=True))
        if self.forward_cnt % 500 == 0:
            mean_mask = torch.mean(torch.mean(masks.float(), dim=2), dim=0)
            mean_mask_w = torch.mean(torch.mean(mask_w, dim=2), dim=0)
            min_mask_w = torch.min(torch.min(mask_w, dim=2)[0], dim=0)[0]
            max_mask_w = torch.max(torch.max(mask_w, dim=2)[0], dim=0)[0]
            # print(mean_mask.data)
            print(mean_mask_w.data, min_mask_w.data, max_mask_w.data)
        self.forward_cnt += 1
        return pi, mu, gamma, loc_max