Esempio n. 1
0
    def update(self, minibatches):
        # inputs
        all_x = torch.cat([x for x, y in minibatches])
        # labels
        all_y = torch.cat([y for _, y in minibatches])
        # one-hot labels
        all_o = torch.nn.functional.one_hot(all_y, self.num_classes)
        # features
        all_f = self.featurizer(all_x)
        # predictions
        all_p = self.classifier(all_f)

        # Equation (1): compute gradients with respect to representation
        all_g = autograd.grad((all_p * all_o).sum(), all_f)[0]

        # Equation (2): compute top-gradient-percentile mask
        percentiles = np.percentile(all_g.cpu(), self.drop_f, axis=1)
        percentiles = torch.Tensor(percentiles)
        percentiles = percentiles.unsqueeze(1).repeat(1, all_g.size(1))
        mask_f = all_g.lt(percentiles.cuda()).float()

        # Equation (3): mute top-gradient-percentile activations
        all_f_muted = all_f * mask_f

        # Equation (4): compute muted predictions
        all_p_muted = self.classifier(all_f_muted)

        # Section 3.3: Batch Percentage
        all_s = F.softmax(all_p, dim=1)
        all_s_muted = F.softmax(all_p_muted, dim=1)
        changes = (all_s * all_o).sum(1) - (all_s_muted * all_o).sum(1)
        percentile = np.percentile(changes.detach().cpu(), self.drop_b)
        mask_b = changes.lt(percentile).float().view(-1, 1)
        mask = torch.logical_or(mask_f, mask_b).float()

        # Equations (3) and (4) again, this time mutting over examples
        all_p_muted_again = self.classifier(all_f * mask)

        # Equation (5): update
        loss = F.cross_entropy(all_p_muted_again, all_y)
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return {'loss': loss.item()}
Esempio n. 2
0
    def __init__(self, in_features, out_features, groups, size, kernel_size=3):
        super().__init__()
        if in_features % groups != 0:
            raise UserWarning("Nope")
        self.out_features = out_features
        self.weight = torch.nn.Parameter(
            torch.empty(out_features, kernel_size**2 * in_features))
        torch.nn.init.orthogonal_(self.weight)

        def flt(x: torch.Tensor, n):
            return torch.logical_or(x >= n, x < 0)

        features = in_features // groups
        with torch.no_grad():
            mask_kernel = torch.arange(-1, 2).view(-1, 1)
            pre_features = torch.cat([
                torch.tensor([2**i] * features) for i in range(groups)
            ]).view(-1, 1, 1, 1, 1)
            pre_kernel = mask_kernel.add(3 * mask_kernel.view(1, -1)).view(
                1, 3, 3, 1, 1) * pre_features
            mask_kernel = mask_kernel.view(1, 1, -1) * pre_features.view(
                -1, 1, 1)
            theight = torch.tensor([size])
            hkernel = torch.arange(0, size).view(-1, 1).view(1, 1, 1, 1, -1)
            wkernel = torch.arange(0, size * size,
                                   size).view(-1, 1).view(1, 1, 1, -1, 1)
            fkernel = torch.arange(0, size * size * in_features,
                                   size * size).view(-1, 1, 1, 1, 1)
            kernel = fkernel.add(wkernel).add(hkernel).add(pre_kernel).view(
                1, -1)
            height_mask = mask_kernel.mul(3).add(hkernel.view(1, -1, 1))
            mask = torch.logical_or(
                flt(
                    height_mask.add(mask_kernel * 2).view(in_features, -1, 1),
                    theight),
                flt(
                    mask_kernel.mul(3).add(hkernel.view(1, -1, 1)).view(
                        in_features, -1, 1).transpose(1, 2), theight))
            mask = mask.view(in_features, size, 3, size,
                             3).transpose(1, -1).transpose(-2, -1).transpose(
                                 1, 2).reshape(1, -1)
            kernel = torch.where(
                mask,
                torch.zeros(1, dtype=torch.long).expand_as(mask), kernel + 1)
        self.register_buffer('kernel', kernel)
Esempio n. 3
0
    def forward(self, x):

        # Feed in the input to the actual model
        y = self.model(x)

        # Ensuring that the first-level outputs (mostly the image-size outputs)
        # do not have nans for visualization and metric purposes
        for key in y.keys():
            if isinstance(y[key], torch.Tensor):
                # Getting the dangerous conditions
                is_nan = torch.isnan(y[key])
                is_inf = torch.isinf(y[key])
                is_nan_or_inf = torch.logical_or(is_nan, is_inf)

                # Filling with stable information.
                y[key][is_nan_or_inf] = 0

        return y
Esempio n. 4
0
def nn_distance_inbox(pc1,
                      seed,
                      pc2,
                      half_size,
                      l1smooth=False,
                      delta=1.0,
                      l1=False):
    """ This is for unsupervised vote loss calculation
    Input:
        pc1: (B,N,C) torch tensor
        pc2: (B,M,C) torch tensor
        half_size: (B,M,3) torch tensor
        l1smooth: bool, whether to use l1smooth loss
        delta: scalar, the delta used in l1smooth loss
    Output:
        dist1: (B,N) torch float32 tensor
        idx1: (B,N) torch int64 tensor
        dist2: (B,M) torch float32 tensor
        idx2: (B,M) torch int64 tensor
    """
    N = pc1.shape[1]
    M = pc2.shape[1]
    pc1_expand_tile = pc1.unsqueeze(2).expand(-1, -1, M, -1)
    seed_expand_tile = seed.unsqueeze(2).expand(-1, -1, M, -1)
    pc2_expand_tile = pc2.unsqueeze(1).expand(-1, N, -1, -1)
    pc_diff = pc1_expand_tile - pc2_expand_tile
    half_size_expand_tile = half_size.unsqueeze(1).expand(-1, N, -1, -1)
    lower = pc2_expand_tile - half_size_expand_tile
    higher = pc2_expand_tile + half_size_expand_tile
    in_box_mask = torch.logical_or(
        (lower > seed_expand_tile).any(dim=3),
        (higher < seed_expand_tile).any(dim=3)).int() * 1000

    if l1smooth:
        pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1)  # (B,N,M)
    elif l1:
        pc_dist = torch.sum(torch.abs(pc_diff), dim=-1)  # (B,N,M)
    else:
        pc_dist = torch.sum(pc_diff**2, dim=-1)  # (B,N,M)

    pc_dist = pc_dist + in_box_mask
    dist1, idx1 = torch.min(pc_dist, dim=2)  # (B,N)
    dist2, idx2 = torch.min(pc_dist, dim=1)  # (B,M)
    return dist1, idx1, dist2, idx2
Esempio n. 5
0
def calc_phis_torch(pred_coords,
                    N_mask,
                    CA_mask,
                    C_mask=None,
                    prop=True,
                    verbose=0):
    """ Filters mirrors selecting the 1 with most N of negative phis.
        Used as part of the MDScaling wrapper if arg is passed. See below.
        Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
        Inputs:
        * pred_coords: (batch, 3, N) predicted coordinates
        * N_mask: (batch, N) boolean mask for N-term positions
        * CA_mask: (batch, N) boolean mask for C-alpha positions
        * C_mask: (batch, N) or None. boolean mask for C-alpha positions or
                    automatically calculate from N_mask and CA_mask if None.
        * prop: bool. whether to return as a proportion of negative phis.
        * verbose: bool. verbosity level
        Output: (batch, N) containing the phi angles or (batch,) containing
                the proportions.
        Note: use [0] since all prots in batch have same backbone
    """
    # detach gradients for angle calculation - mirror selection
    pred_coords_ = torch.transpose(pred_coords.detach(), -1, -2).cpu()
    # ensure dims
    N_mask = expand_dims_to(N_mask, 2 - len(N_mask.shape))
    CA_mask = expand_dims_to(CA_mask, 2 - len(CA_mask.shape))
    if C_mask is not None:
        C_mask = expand_dims_to(C_mask, 2 - len(C_mask.shape))
    else:
        C_mask = torch.logical_not(torch.logical_or(N_mask, CA_mask))
    # select points
    n_terms = pred_coords_[:, N_mask[0].squeeze()]
    c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
    c_terms = pred_coords_[:, C_mask[0].squeeze()]
    # compute phis for every pritein in the batch
    phis = [
        get_dihedral_torch(c_terms[i, :-1], n_terms[i, 1:], c_alphas[i, 1:],
                           c_terms[i, 1:]) for i in range(pred_coords.shape[0])
    ]

    # return percentage of lower than 0
    if prop:
        return torch.tensor([(x < 0).float().mean().item() for x in phis])
    return phis
Esempio n. 6
0
    def compute_loss(self, start_logits, end_logits, span_logits,
                     start_labels, end_labels, match_labels, start_label_mask, end_label_mask):
        batch_size, seq_len = start_logits.size()

        start_float_label_mask = start_label_mask.view(-1).float()
        end_float_label_mask = end_label_mask.view(-1).float()
        match_label_row_mask = start_label_mask.bool().unsqueeze(-1).expand(-1, -1, seq_len)
        match_label_col_mask = end_label_mask.bool().unsqueeze(-2).expand(-1, seq_len, -1)
        match_label_mask = match_label_row_mask & match_label_col_mask
        match_label_mask = torch.triu(match_label_mask, 0)  # start should be less equal to end

        if self.span_loss_candidates == "all":
            # naive mask
            float_match_label_mask = match_label_mask.view(batch_size, -1).float()
        else:
            # use only pred or golden start/end to compute match loss
            start_preds = start_logits > 0
            end_preds = end_logits > 0
            if self.span_loss_candidates == "gold":
                match_candidates = ((start_labels.unsqueeze(-1).expand(-1, -1, seq_len) > 0)
                                    & (end_labels.unsqueeze(-2).expand(-1, seq_len, -1) > 0))
            else:
                match_candidates = torch.logical_or(
                    (start_preds.unsqueeze(-1).expand(-1, -1, seq_len)
                     & end_preds.unsqueeze(-2).expand(-1, seq_len, -1)),
                    (start_labels.unsqueeze(-1).expand(-1, -1, seq_len)
                     & end_labels.unsqueeze(-2).expand(-1, seq_len, -1))
                )
            match_label_mask = match_label_mask & match_candidates
            float_match_label_mask = match_label_mask.view(batch_size, -1).float()
        if self.loss_type == "bce":
            start_loss = self.bce_loss(start_logits.view(-1), start_labels.view(-1).float())
            start_loss = (start_loss * start_float_label_mask).sum() / start_float_label_mask.sum()
            end_loss = self.bce_loss(end_logits.view(-1), end_labels.view(-1).float())
            end_loss = (end_loss * end_float_label_mask).sum() / end_float_label_mask.sum()
            match_loss = self.bce_loss(span_logits.view(batch_size, -1), match_labels.view(batch_size, -1).float())
            match_loss = match_loss * float_match_label_mask
            match_loss = match_loss.sum() / (float_match_label_mask.sum() + 1e-10)
        else:
            start_loss = self.dice_loss(start_logits, start_labels.float(), start_float_label_mask)
            end_loss = self.dice_loss(end_logits, end_labels.float(), end_float_label_mask)
            match_loss = self.dice_loss(span_logits, match_labels.float(), float_match_label_mask)

        return start_loss, end_loss, match_loss
Esempio n. 7
0
    def append(self, ul_data, mask=None, batch_index=None):
        """
        Function used during the appending procedure.

        Args:

        ul_data (:obj:`torch.FloatTensor`): Unlabeled data batch.

        mask (:obj:`torch.BoolTensor`): Mask of the data object which are going to accepted
        from the batch. This object also helps in keeping track of the examples which are inserted.

        batch_index (:obj: ´int´): Index of the batch of unlabeled data. To be used by batch_masks
        dictionary.

        Return:
        mask_change.sum() (:obj: ´int´): Sum of any insertion and deletion of examples in the dataset.

        """

        mask = torch.ones(ul_data['input_ids'].size()[0]).bool() if mask is None else mask

        for k in ul_data:
            to_add = ul_data[k][mask].tolist()
            if len(to_add):
                self.to_append_dic[k] += to_add

        if batch_index is not None:

            mask_change = mask.clone()

            if batch_index not in self.batch_masks.keys():
                self.batch_masks[batch_index] = mask

            else:
                exists = torch.logical_and(self.batch_masks[batch_index], mask)
                mask_change[exists] = False

                changed_to_false = torch.logical_and(self.batch_masks[batch_index], mask == False)
                self.batch_masks[batch_index][changed_to_false] = False
                mask_change[changed_to_false] = True

                self.batch_masks[batch_index] = torch.logical_or(self.batch_masks[batch_index], mask)

            return mask_change.sum()
Esempio n. 8
0
    def render(self, world, use_alpha=True):
        total_colors = torch.zeros((self.image_width * self.image_height, 3),
                                   device=self.device)
        total_alpha = torch.zeros((self.image_width * self.image_height, 1),
                                  device=self.device)
        for _ in range(self.antialiasing):
            x = torch.tile(
                torch.linspace(0, (self.out_shape[1] - 1) / self.out_shape[1],
                               self.out_shape[1]),
                (self.out_shape[0], )).unsqueeze(1)
            y = torch.repeat_interleave(
                torch.linspace(0, (self.out_shape[0] - 1) / self.out_shape[0],
                               self.out_shape[0]),
                self.out_shape[1]).unsqueeze(1)

            x += torch.rand(x.shape) / self.out_shape[1]
            y += torch.rand(y.shape) / self.out_shape[0]

            ray = Rays(origin=self.origin,
                       directions=self.lower_left_corner +
                       x * self.horizontal + y * self.vertical - self.origin,
                       device=self.device)

            color = torch.full(ray.pos.size(),
                               self.background_color,
                               device=self.device)
            alpha = torch.zeros((self.image_width * self.image_height, 1),
                                device=self.device)

            self.timestep_init((self.image_width * self.image_height, 1))

            for _ in tqdm(range(self.steps), disable=not self.debug):
                ray, color, alpha = self.step(ray, world, color, alpha)

            total_colors += color
            total_alpha = torch.logical_or(total_alpha, alpha)

        scale = 1 / self.antialiasing
        total_colors = torch.sqrt(scale * total_colors)
        return Image.from_flat(total_colors,
                               total_alpha,
                               self.image_width,
                               self.image_height,
                               use_alpha=use_alpha)
Esempio n. 9
0
def crop_and_resize_face(source_image,
                         instances,
                         face_detector,
                         use_rendered,
                         rendered_image,
                         target_shape=(160, 160)):
    face_mask = torch.logical_or(torch.ge(instances, 23),
                                 torch.ge(instances, 24))
    instances_masked = torch.mul(instances, face_mask)
    face_indicies = torch.nonzero(instances_masked, as_tuple=True)
    resize_diff = int((target_shape[0] - target_shape[1]) / 2)
    if torch.numel(face_indicies[0]) == 0 or torch.numel(
            face_indicies[1]) == 0:
        return 0
    else:
        xmin, xmax = [
            torch.min(face_indicies[0]).item(),
            torch.max(face_indicies[0]).item()
        ]
        ymin, ymax = [
            torch.min(face_indicies[1]).item(),
            torch.max(face_indicies[1]).item()
        ]
        cropped_face = source_image[:, xmin:xmax, ymin:ymax]
        cropped_face = cropped_face.permute(
            (1, 2, 0)).add(1).div(2).mul(255).cpu().numpy()
        try:
            box = face_detector.detect(cropped_face)[0][0]
            if use_rendered:
                cropped_rendered_face = rendered_image[:, xmin:xmax, ymin:ymax]
                cropped_rendered_face = cropped_rendered_face.permute(
                    (1, 2, 0)).add(1).div(2).mul(255).detach().cpu().numpy()
                cropped_face = extract_face(cropped_rendered_face,
                                            box,
                                            image_size=target_shape[0])
            else:
                cropped_face = extract_face(cropped_face,
                                            box,
                                            image_size=target_shape[0])
            cropped_face = cropped_face[:, resize_diff:target_shape[0] -
                                        resize_diff, :]
            return cropped_face.cuda()
        except:
            return 0
    def prob_loss_fn(self, prob_pred, pos_equal_one, pos_equal_one_sum,
                     neg_equal_one, neg_equal_one_sum):
        mask = torch.logical_or(pos_equal_one, neg_equal_one)

        prob_pred = prob_pred[mask]
        labels = pos_equal_one[mask]

        cls_losses = self.ce_loss(prob_pred, labels)
        cls_pos_losses = cls_losses[labels.bool()] / pos_equal_one_sum
        cls_neg_losses = cls_losses[(1 - labels).bool()] / neg_equal_one_sum

        cls_pos_loss = torch.sum(cls_pos_losses) * (1. /
                                                    self.global_batch_size)
        cls_neg_loss = torch.sum(cls_neg_losses) * (1. /
                                                    self.global_batch_size)

        cls_loss = self.alpha_bce * cls_pos_loss + self.beta_bce * cls_neg_loss

        return cls_loss, cls_pos_loss, cls_neg_loss
Esempio n. 11
0
    def batchwise_z_score_trimming(self, Y):

        # Determing the characteristic of the data
        std = torch.std(Y, dim=1)
        mean = torch.mean(Y, dim=1)

        # Calculate the z score
        diff = (Y - torch.unsqueeze(mean, dim=1))
        z_score = diff / torch.unsqueeze(std, dim=1)

        # Determine the outliers
        outliers = z_score > self.HPARAM.PRUN_ZSCORE_THRESHOLD
        logic_or_outliers = torch.logical_or(outliers[:, :, 0], outliers[:, :,
                                                                         1])

        outliers_idx = torch.unsqueeze(logic_or_outliers,
                                       dim=-1).expand(Y.shape)

        return outliers_idx
Esempio n. 12
0
def run_epoch(data_iter, model, lr_scheduler, optimizer, criterion, device):
    for i, batch in tqdm(enumerate(data_iter)):
        inp = batch[0]['input_ids'].to(device)
        attn = batch[0]['attention_mask'].to(device)
        target = batch[1].to(device)
        
        mask = torch.logical_or(target==0, target==101)
        logits = model(input_ids=inp, attention_mask=attn).logits
        loss_t = criterion(logits.transpose(1,2), target)
        loss = torch.mean(loss_t * (~mask))
        if i % 50 == 0:
            wandb.log({'train_loss':loss})
        if i % 500 == 0:
            data = decode_text(data_iter.dataset.tokenizer, inp, target, logits)
            wandb.log({"examples": wandb.Table(dataframe=data)})
        optimizer.zero_grad()
        lr_scheduler.step(optimizer)
        loss.backward()
        optimizer.step()
Esempio n. 13
0
def flipUpBestNewAttributes(model, model0, malicious_nodes: torch.Tensor, num_attributes_left: torch.Tensor)\
        -> torch.Tensor:
    """
        tests the model according to the train/val/test masks and attack mask

        Parameters
        ----------
        model: Model - post-attack model
        model0: Model - pre-attack model
        malicious_nodes: torch.Tensor - the attacker/malicious node
        num_attributes_left: torch.Tensor -  a torch tensor vector with ones where the attribute is not flipped

        Returns
        -------
        num_attributes_left: torch.Tensor -  a torch tensor vector with ones where the attribute is not flipped
    """
    for malicious_idx, malicious_node in enumerate(malicious_nodes):
        row = model.node_attribute_list[malicious_node][0]
        row0 = model0.node_attribute_list[malicious_node][0]

        # exclude attributes which are already used and attributes with negative gradient
        zero_mask = torch.logical_or(row < row0, row0 == 1)
        diff = row - row0
        diff[zero_mask] = 0

        # find best gradient indexes
        row = row0.clone().detach()
        max_diff = diff.max()
        flip_indexes = (diff == max_diff).nonzero(as_tuple=True)[0]

        # check if attribute limit exceeds
        if num_attributes_left[malicious_idx] < flip_indexes.shape[0]:
            flip_indexes = flip_indexes[:num_attributes_left[malicious_idx]]

        # flip
        if max_diff != 0:
            row[flip_indexes] = 1
            num_attributes_left[malicious_idx] -= flip_indexes.shape[0]

        # save flipped gradients
        model.setNodesAttributes(idx_node=malicious_node, values=row)
    return num_attributes_left
Esempio n. 14
0
def meta_test_one(cfg, backbone_net, criterion, feature_shape, device, support_img_tensor_bchw, support_mask_tensor_bhw,
                            query_img_tensor_bchw, query_mask_tensor_bhw):
    post_processor = classifier.dispatcher(cfg, feature_shape)
    post_processor = post_processor.to(device)

    num_shots = support_img_tensor_bchw.shape[0]

    # Sanity check to make sure that there are at least some foreground pixels
    for b in range(support_mask_tensor_bhw.shape[0]):
        assert 1 in support_mask_tensor_bhw[b]
    assert 1 in query_mask_tensor_bhw

    # Support set 1. Use masked average pooling to initialize class weight vector to bootstrap fine-tuning
    with torch.no_grad():
        support_feature = backbone_net(support_img_tensor_bchw)
        fg_vec = masked_average_pooling(support_mask_tensor_bhw, support_feature)  # 1 x C
        bg_vec = masked_average_pooling(support_mask_tensor_bhw == 0, support_feature)  # 1 x C
        fg_vec = fg_vec.reshape((1, -1, 1, 1)) # 1xCx1x1
        bg_vec = bg_vec.reshape((1, -1, 1, 1)) # 1xCx1x1
        bg_fg_class_mat = torch.cat([bg_vec, fg_vec], dim=0) #2xCx1x1
        post_processor.pixel_classifier.class_mat.weight.data = bg_fg_class_mat

    # Support set 2. TODO(roger): add back optimization to fine-tune initialized vectors

    # Query set. Evaluation
    with torch.no_grad():
        query_feature = backbone_net(query_img_tensor_bchw)
        eval_ori_spatial_res = query_img_tensor_bchw.shape[-2:]
        eval_predicted_mask = post_processor(query_feature, eval_ori_spatial_res)
        pred_map = eval_predicted_mask.max(dim = 1)[1]

        # TODO(roger): relax this to support multi-way
        # Following PANet, we use ignore_mask to mask confusing pixels from metric
        predicted_fg = torch.logical_and(pred_map == 1, query_mask_tensor_bhw != -1)
        predicted_bg = torch.logical_or(pred_map == 0, query_mask_tensor_bhw == -1)
        tp_cnt = torch.logical_and(predicted_fg, query_mask_tensor_bhw == 1).sum()
        fp_cnt = torch.logical_and(predicted_fg, query_mask_tensor_bhw != 1).sum()
        fn_cnt = torch.logical_and(predicted_bg, query_mask_tensor_bhw == 1).sum()
        tn_cnt = torch.logical_and(predicted_bg, query_mask_tensor_bhw != 1).sum()
    
    return tp_cnt, fp_cnt, fn_cnt, tn_cnt
Esempio n. 15
0
def make_graph(xyz, pair, idx, top_k=64, kmin=9):
    '''
    Input:
        - xyz: current backbone cooordinates (B, L, 3, 3)
        - pair: pair features from Trunk (B, L, L, E)
        - idx: residue index from ground truth pdb
    Output:
        - G: defined graph
    '''

    B, L = xyz.shape[:2]
    device = xyz.device

    # distance map from current CA coordinates
    D = torch.cdist(xyz[:, :, 1, :], xyz[:, :, 1, :]) + torch.eye(
        L, device=device).unsqueeze(0) * 999.9  # (B, L, L)
    # seq sep
    sep = idx[:, None, :] - idx[:, :, None]
    sep = sep.abs() + torch.eye(L, device=device).unsqueeze(0) * 999.9

    # get top_k neighbors
    D_neigh, E_idx = torch.topk(D, min(top_k, L),
                                largest=False)  # shape of E_idx: (B, L, top_k)
    topk_matrix = torch.zeros((B, L, L), device=device)
    topk_matrix.scatter_(2, E_idx, 1.0)

    # put an edge if any of the 3 conditions are met:
    #   1) |i-j| <= kmin (connect sequentially adjacent residues)
    #   2) top_k neighbors
    cond = torch.logical_or(topk_matrix > 0.0, sep < kmin)
    b, i, j = torch.where(cond)

    src = b * L + i
    tgt = b * L + j
    G = dgl.graph((src, tgt), num_nodes=B * L).to(device)
    G.edata['d'] = (
        xyz[b, j, 1, :] -
        xyz[b, i, 1, :]).detach()  # no gradient through basis function
    G.edata['w'] = pair[b, i, j]

    return G
Esempio n. 16
0
def iou_loss(prediction, target, threshold=0.5):
    assert prediction.shape == target.shape, "Prediction shape and target shape do not match"
    N = prediction.shape[0]
    prediction = (prediction > threshold).float()

    # flatten the two input vectors and compute

    # intersection(pred, target) = true_positives
    intersection = torch.logical_and(prediction.reshape(N, -1),
                                     target.reshape(N, -1))

    # union(pred, target) = true_positives + false_positives + false_negatives
    union = torch.logical_or(prediction.reshape(N, -1), target.reshape(N, -1))
    intersection_sample = intersection.sum(
        dim=1)  # compute the intersection for each sample
    union_sample = union.sum(dim=1)  # compute the union for each sample
    iou_sample = (intersection_sample / torch.clamp(union_sample, min=1e-15)
                  )  # compute the iou for each sample
    result = -torch.mean(iou_sample)
    result.requires_grad = True
    return result
Esempio n. 17
0
    def sample_texture(self, batch):
        uv_texture = batch[DIK.TEXTURE_PERSON]
        uv_map = batch[DIK.UV_RENDERED]

        texture_sampled = torch.nn.functional.grid_sample(
            uv_texture,
            uv_map,
            mode='bilinear',
            padding_mode='border',
            align_corners=False)

        # Grid sample yields the same value for not rendered region
        # We mask that region
        batch[DIK.MASK_UV] = torch.logical_or(
            batch[DIK.UV_RENDERED][:, :, :, 1] != -1,
            batch[DIK.UV_RENDERED][:, :, :, 0] != -1).unsqueeze(1)
        mask = batch[DIK.MASK_UV].expand_as(texture_sampled)
        texture_sampled[~mask] = self.MASK_VALUE

        batch[DIK.FACE_FEATUREIMAGE] = texture_sampled
        return batch
 def count_safe(module, module_in):
     assert isinstance(module_in, tuple) and len(module_in) == 1
     module_in = module_in[0]
     mi = torch.min(module_in).item()
     ma = torch.max(module_in).item()
     if self.train:
         if mi < self.min:
             self.min = mi
         if ma > self.max:
             self.max = ma
     else:
         lower_lim, upper_lim = self.get_bounds()
         bounded_check = torch.logical_or(module_in < lower_lim,
                                          module_in > upper_lim)
         num_not_safe = torch.sum(bounded_check).item()
         num_all_samples = torch.numel(bounded_check)
         num_safe = num_all_samples - num_not_safe
         #self.count_safe[name].append(num_safe)#/num_all_samples)
         #self.count_notsafe[name].append(num_not_safe)#/num_all_samples)
         self.add_safe(name, num_safe)
         self.add_notsafe(name, num_not_safe)
Esempio n. 19
0
def mIoU(output, mask, n_classes, smooth=1e-10):
    with torch.no_grad():
        output = F.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
        output = output.reshape(-1)
        mask = mask.reshape(-1)

        iou_per_class = []
        for c in range(0, n_classes): #loop per pixel class
            true_class = output == c
            true_label = mask == c

            if true_label.long().sum().item() == 0: #no exist label in this loop
                iou_per_class.append(np.nan)
            else:
                intersect = torch.logical_and(true_class, true_label).sum().float().item()
                union = torch.logical_or(true_class, true_label).sum().float().item()

                iou = (intersect + smooth) / (union +smooth)
                iou_per_class.append(iou)
        return np.nanmean(iou_per_class)
    def _map_source_apparel_on_target(self, source_texture, target_image,
                                      dense_pose):
        unfolded_texture = self._unfold_texture(source_texture)
        mapped_source_feature = self._map_texture(unfolded_texture, dense_pose)

        background_mask = torch.logical_not(dense_pose[:, :, :, 0] == 0)
        apparel_mask = dense_pose[:, :, :, 0] == 2
        for i in range(15, 23):
            apparel_mask = torch.logical_or(apparel_mask, dense_pose[:, :, :,
                                                                     0] == i)

        background_mask = background_mask.unsqueeze(1).repeat(
            1, source_texture.shape[1], 1, 1)
        apparel_mask = apparel_mask.unsqueeze(1).repeat(
            1, source_texture.shape[1], 1, 1)
        identity_mask = torch.logical_not(apparel_mask)

        apparel_masked = mapped_source_feature * apparel_mask * background_mask
        identity_masked = target_image * identity_mask * background_mask

        return apparel_masked + identity_masked
Esempio n. 21
0
def dataset_classes_iou(
    dataset: Dataset, model: Module, n_classes: int, device: device
) -> list:
    """Computes mean IoU for every class between the values predicted by the
    model and the ground truth.

    Parameters
    ----------
    dataset : Dataset
        Dataset to compute the mean IoU for every class.
    model : Module
        The model to evaluate.
    device : device
        The device which is used for computation.

    Returns
    -------
    list
        Mean IoU for every class.
    """
    model.eval()
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)

    classes_iou = {i: [] for i in range(n_classes)}
    for data, label in dataloader:
        data = data.to(device)
        label = label.to(device)

        prediction = model(data)

        prediction = prediction.squeeze().argmax(0)
        label = label.squeeze().argmax(0)

        for i in range(n_classes):
            classes_iou[i].append(
                logical_and(prediction == i, label == i).sum()
                / logical_or(prediction == i, label == i).sum()
            )

    return [mean(classes_iou[i]) for i in range(n_classes)]
Esempio n. 22
0
    def dataset_iou(self, dataset: SegmentationEvaluationDataset,
                    device: device, n_classes: int) -> list:
        """Computes IoU of all of the classes in the dataset.

        Parameters
        ----------
        dataset : SegmentationEvaluationDataset
            Dataset to evaluate.
        device : device
            Device used for computation.
        n_classes : int
            Number of classes.

        Returns
        -------
        list
            The mean IoUs of the classes.
        """
        self.model.to(device)
        self.model.eval()

        classes_iou = {i: [] for i in range(n_classes)}

        for img, _, _, label, _ in dataset:
            img = img.to(device)
            label = label.to(device)

            prediction = self.model(img.unsqueeze(0))

            prediction = prediction.squeeze().argmax(0)
            label = label.argmax(0)

            for i in range(n_classes):
                classes_iou[i].append(
                    logical_and(prediction == i, label
                                == i).sum().detach().item() /
                    logical_or(prediction == i, label
                               == i).sum().detach().item())

        return [mean(classes_iou[i]) for i in range(n_classes)]
Esempio n. 23
0
    def predict(self, imgs, decode_lengths):

        enc_output = self.encoder(imgs)

        batch_size = enc_output.size(0)
        encoder_dim = enc_output.size(-1)

        # embed start token
        input_tokens = torch.ones(batch_size).type_as(
            enc_output).long() * self.tokenizer.stoi["<sos>"]
        input_tokens = input_tokens.unsqueeze(1)

        predictions = []

        end_condition = torch.zeros(batch_size,
                                    dtype=torch.long).type_as(enc_output)

        memory = None

        # predict sequence
        for t in range(decode_lengths):

            x, memory = self.decoder(input_tokens, enc_output, memory=memory)

            preds = x.squeeze(1)

            output_token = torch.argmax(preds, -1)

            predictions.append(output_token)

            end_condition = torch.logical_or(
                end_condition, (output_token == self.tokenizer.stoi["<eos>"]))
            if end_condition.sum() == batch_size:
                break
            output_token = output_token.unsqueeze(1)
            input_tokens = torch.cat([input_tokens, output_token], dim=1)

        predictions = torch.stack(predictions, dim=-1)

        return predictions
Esempio n. 24
0
def test_WeightDropout():
    p = 0.8
    lstm = nn.LSTMCell(2, 4)
    dp_lstm = WeightDropout(lstm, weight_p=p, layer_names=['weight_hh'])

    test_inp = torch.randn(8, 2)
    test_h, test_c = torch.randn(8, 4), torch.randn(8, 4)

    assert dp_lstm.training is True
    assert dp_lstm.weight_hh_raw.requires_grad is True

    # check dropout mask is applied on target weight matrices with proper scaling
    # both nn.Dropout and F.Dropout has scaling applied
    weight_before = dp_lstm.module.weight_hh.data.clone()
    h, c = dp_lstm(test_inp, (test_h, test_c), reset_mask=True)
    weight_after_reset = dp_lstm.module.weight_hh.data.clone()
    assert torch.logical_or(weight_after_reset == 0,
                            (weight_before /
                             (1 - p) - weight_after_reset).abs() < 1e-5).all()

    # check gradients are computed, dropout entries have grad = 0
    loss = h.sum()
    loss.backward()
    assert dp_lstm.weight_hh_raw.grad is not None
    assert torch.logical_and(dp_lstm.weight_hh_raw.grad == 0,
                             dp_lstm.module.weight_hh == 0).any()

    # check dropout mask is fixed when reset_mask = False
    dp_lstm.zero_grad()
    h, c = dp_lstm(test_inp, (test_h, test_c), reset_mask=False)
    weight_without_reset = dp_lstm.module.weight_hh.data.clone()
    assert (weight_without_reset == weight_after_reset).all()

    # check in validation mode, nn.Tensor + no dropout applied + no scaling
    dp_lstm.eval()
    with torch.no_grad():
        h, c = dp_lstm(test_inp, (test_h, test_c), reset_mask=False)
        assert (dp_lstm.module.weight_hh == dp_lstm.weight_hh_raw).all()
        h, c = dp_lstm(test_inp, (test_h, test_c), reset_mask=True)
        assert (dp_lstm.module.weight_hh == dp_lstm.weight_hh_raw).all()
Esempio n. 25
0
def compute_overall_iou_gpu(pred, target, num_classes):
    shape_ious = []
    pred = pred.max(dim=2)[1]    # (batch_size, num_points)  the pred_class_idx of each point in each sample
    # pred_np = pred.cpu().data.numpy()

    # target_np = target.cpu().data.numpy()
    for shape_idx in range(pred.size(0)):   # sample_idx
        part_ious = []
        for part in range(num_classes):   # class_idx! no matter which category, only consider all part_classes of all categories, check all 50 classes
            # for target, each point has a class no matter which category owns this point! also 50 classes!!!
            # only return 1 when both belongs to this class, which means correct:
            I = torch.sum(torch.logical_and(pred[shape_idx] == part, target[shape_idx] == part))
            # always return 1 when either is belongs to this class:
            U = torch.sum(torch.logical_or(pred[shape_idx] == part, target[shape_idx] == part))

            F = torch.sum(target[shape_idx] == part)

            if F != 0:
                iou = I / float(U)    #  iou across all points for this class
                part_ious.append(iou)   #  append the iou of this class
        shape_ious.append(sum(part_ious)/len(part_ious))   # each time append an average iou across all classes of this sample (sample_level!)
    return shape_ious   # [batch_size]
Esempio n. 26
0
    def _greedy_decoding(self, hidden):
        batch_size = hidden.shape[0]

        prompt_start = torch.LongTensor(batch_size).fill_(CLS_IDX).to(
            self.device)
        input_sequence = prompt_start.unsqueeze(1)
        batch_running = torch.arange(batch_size).to(self.device)
        sequence_length = torch.zeros(batch_size,
                                      dtype=torch.long).to(self.device)

        generation = torch.zeros(batch_size,
                                 self.hparams.max_sent_len).to(self.device)
        t = 0
        while t < self.hparams.max_sent_len:

            logits = self.decoder_forward(hidden, input=input_sequence)
            logits = logits[:, -1, :]
            score, sample = torch.topk(logits, 1, dim=-1)

            input_sequence = torch.cat([input_sequence, sample], dim=-1)
            sample = sample.squeeze_(-1)
            is_end = torch.logical_or(sample == PAD_IDX, sample == SEP_IDX)
            sequence_length[batch_running] = torch.where(
                is_end,
                torch.LongTensor(batch_running.shape[0]).fill_(t + 1).to(
                    self.device), sequence_length[batch_running])

            if torch.any(is_end):
                batch_running = batch_running[~is_end]
                input_sequence = input_sequence[batch_running]
                generation[batch_running] = torch.where(
                    is_end, input_sequence, generation[batch_running])

                if not batch_running.shape[0]:
                    break

            t += 1

        return generation
Esempio n. 27
0
def batchwise_get_2d_iou(batch_masks1, batch_masks2):

    # Number of masks
    n_of_m1, h, w = batch_masks1.shape
    n_of_m2 = batch_masks2.shape[0]

    # Expand the masks to match size
    expanded_b_masks1 = torch.unsqueeze(batch_masks1, dim=1).expand(
        (n_of_m1, n_of_m2, h, w))
    expanded_b_masks2 = batch_masks2.expand((n_of_m1, n_of_m2, h, w))

    # Calculating the intersection and union
    intersection = torch.sum(torch.logical_and(expanded_b_masks1,
                                               expanded_b_masks2),
                             dim=(2, 3))
    union = torch.sum(torch.logical_or(expanded_b_masks1, expanded_b_masks2),
                      dim=(2, 3))

    # Calculating iou
    iou = intersection / union

    return iou
Esempio n. 28
0
def train():
	alpha = 0.7
	model.train()
	#negative sampling
	neg_row, neg_col = negative_sampling(data.train_pos_edge_index, 
																	num_nodes = data.num_nodes,  
																	num_neg_samples= data.train_pos_edge_index.size(1))
	to_keep = ~ torch.logical_and(neg_row >= data.x_paper.size(0) , neg_col >= data.x_paper.size(0)) #keep exclude mesh-mesh edges
	neg_row, neg_col = neg_row[to_keep], neg_col[to_keep]
	train_neg_edge_index = torch.stack([neg_row, neg_col], dim=0)
	train_neg_edge_type = torch.logical_or(torch.logical_and(neg_row < data.x_paper.size(0) , neg_col >= data.x_paper.size(0)), torch.logical_and(neg_row >= data.x_paper.size(0) , neg_col < data.x_paper.size(0))).to(torch.float32)
	sort_indices = torch.argsort(train_neg_edge_type)
	train_neg_edge_index = train_neg_edge_index[:, sort_indices]
	train_neg_edge_type = train_neg_edge_type[sort_indices]

	optimizer.zero_grad()

	z = model.encode()
	link_logits = model.decode(z, data.train_pos_edge_index, data.train_pos_edge_type, train_neg_edge_index, train_neg_edge_type)
	link_labels = get_link_labels(data.train_pos_edge_index, train_neg_edge_index)

	link_logits_paper_paper = model.decode(z, data.train_pos_edge_index[:, data.train_pos_edge_type == 0], data.train_pos_edge_type[data.train_pos_edge_type == 0], train_neg_edge_index[:, train_neg_edge_type ==0], train_neg_edge_type[train_neg_edge_type ==0])
	link_logits_paper_mesh = model.decode(z,  data.train_pos_edge_index[:, data.train_pos_edge_type == 1], data.train_pos_edge_type[data.train_pos_edge_type == 1], train_neg_edge_index[:, train_neg_edge_type ==1], train_neg_edge_type[train_neg_edge_type ==1])
	
	link_labels_paper_paper = get_link_labels(data.train_pos_edge_index[:, data.train_pos_edge_type == 0], train_neg_edge_index[:, train_neg_edge_type ==0])
	link_labels_paper_mesh = get_link_labels(data.train_pos_edge_index[:, data.train_pos_edge_type == 1], train_neg_edge_index[:, train_neg_edge_type ==1])

	loss_paper_paper = F.binary_cross_entropy_with_logits(link_logits_paper_paper, link_labels_paper_paper)
	loss_paper_mesh = F.binary_cross_entropy_with_logits(link_logits_paper_mesh, link_labels_paper_mesh)

	loss = (1/2) * ((1 - alpha) * loss_paper_paper + alpha * loss_paper_mesh)

	# loss = F.binary_cross_entropy_with_logits(link_logits, link_labels)

	loss.backward()
	optimizer.step()
	link_probs = link_logits.sigmoid()
	rocauc=roc_auc_score(link_labels.detach().cpu().numpy(), link_probs.detach().cpu().numpy())
	return loss, rocauc
Esempio n. 29
0
    def build_dataset(self,
                      model,
                      loader,
                      loss_fn,
                      classes,
                      device,
                      num_classes=10):
        epistemic_x = []
        epistemic_y = []

        with torch.no_grad():
            for x, y in loader:
                u_out = torch.clamp(torch.log10(
                    loss_fn(
                        model(x.to(device)),
                        torch.nn.functional.one_hot(y, num_classes).to(
                            device, torch.float32)).sum(1)),
                                    min=-5).view(-1, 1)
                u_in = []
                if 'd' in self.features:
                    density_feature = self.density_estimator.score_samples(
                        x, device, no_preprocess=False).to(device)
                    u_in.append(density_feature)
                if 'v' in self.features:
                    variance_feature = self.variance_estimator.score_samples(
                        data=x, no_preprocess=False).to(device).view(-1, 1)
                    u_in.append(variance_feature)
                if 'b' in self.features:
                    u_in.append(
                        torch.logical_or(y == classes[0],
                                         y == classes[1]).view(
                                             -1, 1).float().to(device))
                u_in = torch.cat(u_in, dim=1)
                epistemic_x.append(u_in)
                epistemic_y.append(u_out)
            epi_x, epi_y = torch.cat(epistemic_x,
                                     dim=0), torch.cat(epistemic_y, dim=0)
        return epi_x, epi_y
Esempio n. 30
0
def generate_batch(data_batch):

    de_batch, en_batch = [], []
    src_pad_masks, tgt_pad_masks = [], []
    for (de_item, en_item) in data_batch:
        de_batch.append(
            torch.cat(
                [torch.tensor([BOS_IDX]), de_item,
                 torch.tensor([EOS_IDX])],
                dim=0))
        en_batch.append(en_item)
    de_batch = pad_sequence(de_batch, batch_first=True, padding_value=PAD_IDX)
    en_batch = pad_sequence(en_batch, batch_first=True, padding_value=PAD_IDX)
    for en_item in en_batch:
        curr_mask = en_item == PAD_IDX
        src_pad_masks.append(curr_mask)
    src_pad_masks = torch.stack(src_pad_masks)
    for de_item in de_batch:
        curr_mask = torch.logical_or(de_item == PAD_IDX,
                                     de_item == EOS_IDX)[:-1]
        tgt_pad_masks.append(curr_mask)
    tgt_pad_masks = torch.stack(tgt_pad_masks)
    return en_batch, de_batch, src_pad_masks, tgt_pad_masks