Exemplo n.º 1
0
def model_res2targets_acc(targeted: bool, y_targets: torch.Tensor, model_res: torch.Tensor) -> float:
    """
        converts the probabilities of the attacked node to a bool of attack success/fail

        Parameters
        ----------
        targeted: bool
        y_targets: torch.Tensor - the target labels of the attack
        model_res: torch.Tensor - model result for the attacked node

        Returns
        -------
        y_targets_acc: : float - attack success (True, False)
    """
    pred_val, pred = model_res.max(1)

    # edge case where more than one of the classes has the same prob
    max_pred = (model_res == pred_val)
    if max_pred.sum() > 1 and max_pred[range(0, y_targets.shape[0]), y_targets]:
        return True


    if y_targets.shape[0] == 1:
        y_targets_acc = (pred == y_targets)
        if not targeted:
            y_targets_acc = torch.logical_not(y_targets_acc)
    else:
        y_targets_acc = torch.sum(pred == y_targets).type(torch.FloatTensor) / y_targets.shape[0]
        if not targeted:
            y_targets_acc = 1 - y_targets_acc
    y_targets_acc = y_targets_acc.item()
    return y_targets_acc
    def forward(self, input, name=None):
        '''
        Extracts features for an input using the corresponding encoder (by name)
        '''
        # check if input is a sequence or a sequence of sequences
        if len(input.size()) == 2:
            # if it is a sequence, the output of a single transformer is used
            ignore_mask = (input == 0)
            out = self.tfs[name](self.word_embedding(input), ignore_mask)

        else:
            # if it's a sequence of sequences, the first encoder is applied
            # to each sentence, and the second on

            # reshape from BxNxTxD to BNxTxD
            input_rs = input.view(input.size(0) * input.size(1), input.size(2))
            ignore_mask = (input_rs == 0)

            # trick to avoid nan behavior with fully padded sentences
            # (due to batching)
            ignore_mask[:, 0] = 0
            out = self.tfs[name](self.word_embedding(input_rs), ignore_mask)

            # reshape back
            out = out.view(input.size(0), input.size(1), out.size(-1))

            # create mask for second transformer
            attn_mask = input > 0
            mask_list = (attn_mask.sum(dim=-1) > 0).bool()

            out = self.merger[name](out, torch.logical_not(mask_list))

        return out
Exemplo n.º 3
0
    def down_apply_node_func(self, nodes):
        if 'eta' in nodes.data:
            eta_u = nodes.data['eta']
        else:
            # root
            eta_u = nodes.data['beta']

        gamma_ch = nodes.data['gamma_ch']  # has shape (bs x L x h x h)
        gamma_p_ch = nodes.data['gamma_p_ch'].unsqueeze(
            2)  # has shape (bs x L x 1 x h)
        gamma_r = nodes.data['gamma_r'].unsqueeze(1).unsqueeze(
            2)  # has shape (bs x 1 x 1 x h)
        n_ch_mask = gamma_p_ch.exp().sum((2, 3), keepdim=True)
        a = thlp.div(gamma_ch, gamma_p_ch * n_ch_mask)
        a = thlp.mul(a, gamma_r)
        b = thlp.sum_over(a, 2, keepdim=True)
        # P(Q_l, Q_ch_l | X)
        eta_u_chl = thlp.div(thlp.mul(a,
                                      eta_u.unsqueeze(1).unsqueeze(2)),
                             b * n_ch_mask)  # has shape (bs x L x h x h)

        is_leaf = nodes.data['is_leaf']
        is_internal = th.logical_not(is_leaf)
        self.accumulate_posterior(
            self.U,
            eta_u_chl[is_internal],
            types=nodes.data['t'][is_internal] if self.num_types > 1 else None)
        self.accumulate_posterior(
            self.p,
            eta_u[is_leaf],
            types=nodes.data['t'][is_leaf] if self.num_types > 1 else None,
            pos=nodes.data['pos'][is_leaf]
            if not self.pos_stationarity else None)

        return {'eta_ch': thlp.sum_over(eta_u_chl, 3), 'eta': eta_u}
Exemplo n.º 4
0
def load_ogb_product():
    name = 'ogbn-products'
    from ogb.nodeproppred import DglNodePropPredDataset

    os.symlink('/tmp/dataset/', os.path.join(os.getcwd(), 'dataset'))

    print('load', name)
    data = DglNodePropPredDataset(name=name)
    print('finish loading', name)
    splitted_idx = data.get_idx_split()
    graph, labels = data[0]
    labels = labels[:, 0]

    graph.ndata['label'] = labels
    in_feats = graph.ndata['feat'].shape[1]
    num_labels = len(
        torch.unique(labels[torch.logical_not(torch.isnan(labels))]))

    # Find the node IDs in the training, validation, and test set.
    train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx[
        'valid'], splitted_idx['test']
    train_mask = torch.zeros((graph.number_of_nodes(), ), dtype=torch.bool)
    train_mask[train_nid] = True
    val_mask = torch.zeros((graph.number_of_nodes(), ), dtype=torch.bool)
    val_mask[val_nid] = True
    test_mask = torch.zeros((graph.number_of_nodes(), ), dtype=torch.bool)
    test_mask[test_nid] = True
    graph.ndata['train_mask'] = train_mask
    graph.ndata['val_mask'] = val_mask
    graph.ndata['test_mask'] = test_mask

    return OGBDataset(graph, num_labels)
Exemplo n.º 5
0
    def __filter_classes(self):
        print('\n')
        initial_new_class_label = len(self.class_dict)
        new_class_label = initial_new_class_label
        for c in self.classes:
            if c not in self.class_dict.keys():
                print('Class is not in the data: %s' % c)
                return
            # else:
            print('Class %s, %d' % (c, self.class_dict[c]))
            num_elems = (self.targets == self.class_dict[c]).cpu().sum()
            print('Number of elements in class %s: %d' % (c, num_elems))
            self.targets[(
                self.targets == self.class_dict[c])] = new_class_label
            new_class_label += 1

        num_elems = (self.targets < initial_new_class_label).cpu().sum()
        print('Number of elements in class unknown: %d' % (num_elems))
        self.targets[(self.targets <
                      initial_new_class_label)] = new_class_label
        if self.remove_unknowns:
            idx_to_remove = (self.targets == new_class_label)[:, -1]
            idx_to_keep = torch.logical_not(idx_to_remove)
            self.data = self.data[idx_to_keep, :]
            self.targets = self.targets[idx_to_keep, :]
        self.targets -= initial_new_class_label
        print(np.unique(self.targets.data.cpu()))
        print('\n')
Exemplo n.º 6
0
def generate_fancy_data_labels(sequence_len, batch_size):
    global data_idx
    global inds
    global masks
    global MANUAL_SEED
    temps = list()
    for i in range(batch_size):
        if inds is None or data_idx >= len(inds):
            # hack as use of RNG will fall out of sync due to pipelines being different
            torch.manual_seed(MANUAL_SEED)
            inds = torch.randperm(effective_length, device='cuda')
            masks = (torch.rand(len(inds) // batch_size + 1,
                                batch_size,
                                sequence_len,
                                device='cuda') >= MASK_PROB).long()
            MANUAL_SEED += 1
            print("new epoch", len(inds))
            data_idx = 0
            print("my start", inds[0:5])
            print("masks_checksum:", torch.sum(masks))
        if EASY_MODE:
            data_idx_ = data_idx % EASY_MODE_SIZ
        else:
            data_idx_ = data_idx
        offset = inds[data_idx_]  #* SEQUENCE_LEN
        data_idx += 1

        curr = fancy_data[offset:offset + sequence_len].clone().detach()
        temps.append(curr)
    temp = torch.stack(temps, dim=0).cuda()
    mask = masks[data_idx // batch_size]
    mask_not = torch.logical_not(mask)
    data = mask * temp + mask_not * 124
    label = temp
    return (data, label, mask_not)
Exemplo n.º 7
0
def main(args):
    print(socket.gethostname(), 'Initializing DGL dist')
    dgl.distributed.initialize(args.ip_config, net_type=args.net_type)
    if not args.standalone:
        print(socket.gethostname(), 'Initializing DGL process group')
        th.distributed.init_process_group(backend=args.backend)
    print(socket.gethostname(), 'Initializing DistGraph')
    g = dgl.distributed.DistGraph(args.graph_name,
                                  part_config=args.part_config)
    print(socket.gethostname(), 'rank:', g.rank())

    pb = g.get_partition_book()
    if 'trainer_id' in g.ndata:
        train_nid = dgl.distributed.node_split(
            g.ndata['train_mask'],
            pb,
            force_even=True,
            node_trainer_ids=g.ndata['trainer_id'])
        val_nid = dgl.distributed.node_split(
            g.ndata['val_mask'],
            pb,
            force_even=True,
            node_trainer_ids=g.ndata['trainer_id'])
        test_nid = dgl.distributed.node_split(
            g.ndata['test_mask'],
            pb,
            force_even=True,
            node_trainer_ids=g.ndata['trainer_id'])
    else:
        train_nid = dgl.distributed.node_split(g.ndata['train_mask'],
                                               pb,
                                               force_even=True)
        val_nid = dgl.distributed.node_split(g.ndata['val_mask'],
                                             pb,
                                             force_even=True)
        test_nid = dgl.distributed.node_split(g.ndata['test_mask'],
                                              pb,
                                              force_even=True)
    local_nid = pb.partid2nids(pb.partid).detach().numpy()
    print(
        'part {}, train: {} (local: {}), val: {} (local: {}), test: {} (local: {})'
        .format(g.rank(), len(train_nid),
                len(np.intersect1d(train_nid.numpy(),
                                   local_nid)), len(val_nid),
                len(np.intersect1d(val_nid.numpy(), local_nid)), len(test_nid),
                len(np.intersect1d(test_nid.numpy(), local_nid))))
    if args.num_gpus == -1:
        device = th.device('cpu')
    else:
        dev_id = g.rank() % args.num_gpus
        device = th.device('cuda:' + str(dev_id))
    labels = g.ndata['labels'][np.arange(g.number_of_nodes())]
    n_classes = len(th.unique(labels[th.logical_not(th.isnan(labels))]))
    print('#labels:', n_classes)

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, test_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends")
Exemplo n.º 8
0
    def forward(self, feats, mask):
        """求total scores of all the paths
        Arg:
          feats: tag概率分布. (seq_len, batch_size, tag_size)
          mask: 填充. (seq_len, batch_size)
        Return:
          scores: (batch_size, )
        """
        seq_len, batch_size, tag_size = feats.size()
        # initialize alpha to zero in log space
        alpha = feats.new_full((batch_size, tag_size), fill_value=-10000)
        # alpha in START_TAG is 1
        alpha[:, self.tag2idx[self.START_TAG]] = 0

        # 取当前step的emit score
        for t, feat in enumerate(feats):
            # broadcast dimension: (batch_size, next_tag, current_tag)
            # emit_score is the same regardless of current_tag, so we broadcast along current_tag
            emit_score = feat.unsqueeze(-1)  # (batch_size, tag_size, 1)
            # transition_score is the same regardless of each sample, so we broadcast along batch_size dimension
            transition_score = self.transition.unsqueeze(0)  # (1, tag_size, tag_size)
            # alpha_score is the same regardless of next_tag, so we broadcast along next_tag dimension
            alpha_score = alpha.unsqueeze(1)  # (batch_size, 1, tag_size)
            alpha_score = alpha_score + transition_score + emit_score  # (batch_size, tag_size, tag_size)
            # log_sum_exp along current_tag dimension to get next_tag alpha
            mask_t = mask[t].unsqueeze(-1)  # (batch_size, 1)
            # 累加每次的alpha
            alpha = log_sum_exp(alpha_score, -1) * mask_t + alpha * torch.logical_not(mask_t)  # (batch_size, tag_size)
        # arrive at END_TAG
        alpha = alpha + self.transition[self.tag2idx[self.END_TAG]].unsqueeze(0)  # (batch_size, tag_size)

        return log_sum_exp(alpha, -1)  # (batch_size, )
Exemplo n.º 9
0
    def forward(self, **kwargs):
        # input
        whole_image = kwargs['whole_image']  # (B, 3, H, W)
        relation_features = kwargs[
            'relation_features']  # initial relation embedding (B, N, N, 6)
        text_segments = kwargs['text_segments']  # text segments (B, N, T)
        text_length = kwargs['text_length']  # (B, N)
        iob_tags_label = kwargs[
            'iob_tags_label'] if self.training else None  # (B, N, T)
        mask = kwargs['mask']  # (B, N, T)
        boxes_coordinate = kwargs['boxes_coordinate']  # (B, num_boxes, 8)

        ##### Forward Begin #####
        ### Encoder module ###
        # word embedding
        text_emb = self.word_emb(text_segments)

        # src_key_padding_mask is text padding mask, True is padding value (B*N, T)
        # graph_node_mask is mask for graph, True is valid node, (B*N, T)
        src_key_padding_mask, graph_node_mask = self.compute_mask(mask)

        # set of nodes, (B*N, T, D)
        x = self.encoder(images=whole_image,
                         boxes_coordinate=boxes_coordinate,
                         transcripts=text_emb,
                         src_key_padding_mask=src_key_padding_mask)

        ### Graph module ###
        # text_mask, True for valid, (including all not valid node), (B*N, T)
        text_mask = torch.logical_not(src_key_padding_mask).byte()
        # (B*N, T, D) -> (B*N, D)
        x_gcn = self._aggregate_avg_pooling(x, text_mask)
        # (B*N, 1),True is valid node
        graph_node_mask = graph_node_mask.any(dim=-1, keepdim=True)
        # (B*N, D), filter out not valid node
        x_gcn = x_gcn * graph_node_mask.byte()

        # initial adjacent matrix (B, N, N)
        B, N, T = mask.shape
        init_adj = torch.ones((B, N, N), device=text_emb.device)
        boxes_num = mask[:, :, 0].sum(dim=1, keepdim=True)  # (B, 1)
        # (B, N, D)
        x_gcn = x_gcn.reshape(B, N, -1)
        # (B, N, D), (B, N, N), (B,)
        x_gcn, soft_adj, gl_loss = self.graph(x_gcn, relation_features,
                                              init_adj, boxes_num)
        adj = soft_adj * init_adj

        ### Decoder module ###
        logits, new_mask, log_likelihood = self.decoder(
            x.reshape(B, N, T, -1), x_gcn, mask, text_length, iob_tags_label)
        ##### Forward End #####

        output = {"logits": logits, "new_mask": new_mask, "adj": adj}

        if self.training:
            output['gl_loss'] = gl_loss
            crf_loss = -log_likelihood
            output['crf_loss'] = crf_loss
        return output
Exemplo n.º 10
0
    def _store_episode_end_pos(self, non_first, pos, env_ids):
        """Update _indexed_pos and _headless_indexed_pos for episode end pos.

        Args:
            non_first_idx (tensor): index of the added batch of exp, which are
                not FIRST steps.  We need to update last step pos for all
                these env_ids[non_first_idx].
            pos (tensor): position of the stored batch.
            env_ids (tensor): env_ids of the stored batch.
        """
        _env_ids = env_ids[non_first]
        _pos = pos[non_first]
        # Because this is a non-first step, the previous step's first step
        # is the same as this stored step's first step.  Look it up.
        prev_pos = _pos - 1
        prev_idx = self.circular(prev_pos)
        prev_first = self._indexed_pos[(_env_ids, prev_idx)]
        prev_first_idx = self.circular(prev_first)
        # Record pos of ``FIRST`` step into the current _indexed_pos
        self._indexed_pos[(_env_ids, self.circular(_pos))] = prev_first

        # Store episode end into the ``FIRST`` step of the episode.
        has_head_cond = prev_first > _pos - self._max_length
        has_head, = torch.where(has_head_cond)
        self._indexed_pos[(_env_ids[has_head],
                           prev_first_idx[has_head])] = _pos[has_head]
        # For a headless episode whose ``FIRST`` step was overwritten by new
        # data, the current step has to belong to the same episode as all the
        # other steps in the buffer, i.e. episode is longer than max_length of
        # the buffer.  This means prev_first <= _pos - max_length.
        headless, = torch.where(torch.logical_not(has_head_cond))
        self._headless_indexed_pos[_env_ids[headless]] = _pos[headless]
Exemplo n.º 11
0
    def __call__(self, sample):
        if self.degrees == 0:
            return sample

        img, vertebrae = sample['image'], sample['vertebrae']

        fill = self.fill
        if isinstance(img, torch.Tensor):
            if isinstance(fill, (int, float)):
                fill = [float(fill)] * F._get_image_num_channels(img)
            else:
                fill = [float(f) for f in fill]
        angle = self.get_params(self.degrees)

        img = F.rotate(img, angle, self.resample, self.expand, self.center,
                       fill)
        vertebrae[:, 1:3] = self.rotate_coord(img, angle, vertebrae[:, 1:3])

        width, height = F._get_image_size(img)

        x_check = torch.logical_or(vertebrae[:, 1] < 0,
                                   vertebrae[:, 1] >= width)
        y_check = torch.logical_or(vertebrae[:, 2] < 0,
                                   vertebrae[:, 2] >= height)
        xy_check = torch.logical_or(x_check, y_check)

        return {
            'image': img,
            'vertebrae': vertebrae[torch.logical_not(xy_check)],
            'info': sample['info']
        }
Exemplo n.º 12
0
def criterion(real, pred, pad_index):
    loss_object = nn.CrossEntropyLoss(reduction='none')
    mask = torch.logical_not(real == pad_index)
    loss_ = loss_object(pred, real)
    mask = mask.type(loss_.type())
    loss_ *= mask
    return loss_.sum()
Exemplo n.º 13
0
def decide_with_cut_off(model:Experiment,dataloader:DataLoader):
    for i,data in enumerate(dataloader):
        _,raman = data
        predicted_spectrum =  model(data)
        predicted_confidence = predicted_spectrum[:,:,:,0]
        predicted_confidence_backup,_ = torch.max(predicted_confidence, dim=2)
        predict_confidence = predicted_confidence_backup.clone()
        target_confidence = raman[:,:,0]
    cut_off_list = np.linspace(0.2,0.8,60)
    Ac_list = np.array([])
    Pr_list = np.array([])
    Rc_list = np.array([])
    F1_list = np.array([])
    for cut_off in cut_off_list:
        less = torch.less_equal(predict_confidence,cut_off)
        predict_confidence[less] = 0
        predict_confidence[torch.logical_not(less)] = 1
        Accuracy, Precision,Recall,F1Score = Experiment.scores(predict_confidence,target_confidence)
        predict_confidence = predicted_confidence_backup.clone()
        Ac_list = np.append(Ac_list,Accuracy.item())
        Pr_list = np.append(Pr_list,Precision.item())
        Rc_list = np.append(Rc_list,Recall.item())
        F1_list = np.append(F1_list,F1Score.item())
    fig = make_subplots(rows=1,cols=2)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Ac_list,name="Accuracy"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Pr_list,name="Precision"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Rc_list,name="Recall"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=F1_list,name="F1Score"),row=1,col=1)
    fig.add_trace(go.Scatter(x=Rc_list,y=Pr_list,name="Pr-Rc"),row=1,col=2)
    return fig
Exemplo n.º 14
0
 def forward(self, x, x_mask):
     '''
         x: [batch_size, s_len, hidden_dim]
         x_mask: [batch_size, s_len]
     '''
     
     out = self.pos_enc(x)  # [batch_size, s_len, hidden_dim]
     res = out
     for i, sep_conv in enumerate(self.sep_convs):
         # out = self.layer_norms[i](out)  # [batch_size, s_len, hidden_dim]
         out = sep_conv(out)  # [batch_size, s_len, hidden_dim]
         out = self.layer_norms[i](out)  # [batch_size, s_len, hidden_dim]
         out = self.dropout(F.relu(out))
         out = out + res  # update out
         res = out  # update res
         
     ## self-attn + ffn
     out = out.permute(1,0,2)  # [s_len, batch_size, hidden_dim]
     # In TransformerEncoderLayer, 
     #       src: [s_len, batch_size, hidden_dim]
     #       src_key_padding_mask: [batch_size, s_len], 1 -> pad token, 0 -> true token
     x_mask = torch.logical_not(x_mask)
     out = self.enc_layer(src=out, src_key_padding_mask=x_mask)  # [s_len, batch_size, hidden_dim]        
     out = out.permute(1,0,2)  # [batch_size, s_len, hidden_dim]
     return out  
 def fixed_learnable_downsample(self,
                                features,
                                boxes,
                                out_shape=(7, 7),
                                kernel_size=(3, 3),
                                strides=(2, 2),
                                device=None):
     # start edit 3
     #mask_omit = torch.ones((boxes.shape[0]),dtype=torch.bool,device=device)
     result_x = torch.zeros(features.size(),
                            dtype=torch.float,
                            device=device)
     result_box = torch.zeros(boxes.size(), device=device, dtype=torch.long)
     #N,c,m,n = features.shape
     features_ = features
     boxes_ = boxes
     indexes = torch.arange(boxes.shape[0])
     while indexes.shape[0] > 0:
         mask = torch.logical_and(
             self.outShape(boxes_[:, -1], strides[0],
                           kernel_size[0]) > out_shape[0],
             self.outShape(boxes_[:, -2], strides[1], kernel_size[1]) >
             out_shape[1])
         mask_not = torch.logical_not(mask)
         indexes_ = indexes[mask]
         finalized = indexes[mask_not]
         # port finalized to results
         result_x[finalized, :, :features_.shape[2], :features_.
                  shape[3]] = features_[mask_not, :, :, :]
         result_box[finalized, :] = boxes_[mask_not, :]
         features_, boxes_ = self.rcConv(features_[mask], boxes_[mask])
         indexes = indexes_
     return result_x, result_box
Exemplo n.º 16
0
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
                    prop=True, verbose=0):
    """ Filters mirrors selecting the 1 with most N of negative phis.
        Used as part of the MDScaling wrapper if arg is passed. See below.
        Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
        Inputs:
        * pred_coords: (batch, 3, N) predicted coordinates
        * N_mask: (N, ) boolean mask for N-term positions
        * CA_mask: (N, ) boolean mask for C-alpha positions
        * C_mask: (N, ) or None. boolean mask for C-alpha positions or
                    automatically calculate from N_mask and CA_mask if None.
        * prop: bool. whether to return as a proportion of negative phis.
        * verbose: bool. verbosity level
        Output: (batch, N) containing the phi angles or (batch,) containing
                the proportions.
    """ 
    # detach gradients for angle calculation - mirror selection
    pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
    n_terms  = pred_coords_[:, N_mask.squeeze()]
    c_alphas = pred_coords_[:, CA_mask.squeeze()]
    # select c_term auto if not passed
    if C_mask is not None: 
        c_terms = pred_coords_[:, C_mask]
    else:
        c_terms  = pred_coords_[ :, torch.logical_not(torch.logical_or(N_mask,CA_mask)).squeeze() ]
    # compute phis for every pritein in the batch
    phis = [get_dihedral_torch(c_terms[i, :-1],
                               n_terms[i,  1:],
                               c_alphas[i, 1:],
                               c_terms[i,  1:]) for i in range(pred_coords.shape[0])]

    # return percentage of lower than 0
    if prop: 
        return torch.tensor( [(x<0).float().mean().item() for x in phis] ) 
    return phis
Exemplo n.º 17
0
    def interp(x):
        sw_plus_sg = x.sum(dim=1)
        sw_plus_sg_minus_swc = sw_plus_sg - swc
        _, krow, _ = swof_interpolator(sw_plus_sg, columns_dim=None)
        _, krog, _ = sgof_interpolator(sw_plus_sg_minus_swc, columns_dim=None)
        connected_water = x[:, 0] - swc < eps
        no_gas = x[:, 1] < eps
        other = torch.logical_not(no_gas + connected_water)

        sw_plus_sg_minus_swc = sw_plus_sg_minus_swc * other + torch.logical_not(
            other)
        kro_at_other = (x[:, 1] * krog +
                        (x[:, 0] - swc) * krow) / sw_plus_sg_minus_swc

        kro = krog * connected_water + krow * no_gas + kro_at_other * other
        return kro
Exemplo n.º 18
0
 def merge(self, x):
     '''
     params:
         x: (N, M, D), N speakers, M utterances per speaker, D dimension
     '''
     assert len(x.size()) == 3
     n_spks, n_utts, dimension = x.size()
     x = x.repeat(1, n_utts, 1)
     mask = torch.logical_not(torch.eye(n_utts)).repeat(
         n_spks, 1).view(-1).to(x.device)
     masked_x = x.view(-1, self.d_model)[mask].contiguous().view(
         n_spks * n_utts, -1, self.d_model)
     masked_x = masked_x.transpose(
         0, 1).contiguous()  # n_utts - 1, n_spks * n_utts, dimension
     x, _ = self.attention_transformation(
         masked_x, masked_x, masked_x,
         None)  # n_utts - 1, n_spks * n_utts, dimension
     x = x + masked_x
     if self.pooling == 'mean':
         aggregation = mean(
             x)  # (n_spks * (n_utts - 1), dimension), n_spks * (n_utts - 1)
     else:
         x = x.permute(1, 2, 0)  # n_spks * n_utts, dimension, n_utts - 1
         alpha = self.aggregation(x)
         aggregation = x.view(n_spks * n_utts, self.head,
                              self.d_model // self.head,
                              -1).matmul(alpha.unsqueeze(-1)).view(
                                  n_spks * n_utts, self.d_model)
     return aggregation
Exemplo n.º 19
0
def load_ogb(name):
    from ogb.nodeproppred import DglNodePropPredDataset

    print('load', name)
    data = DglNodePropPredDataset(name=name)
    print('finish loading', name)
    splitted_idx = data.get_idx_split()
    graph, labels = data[0]
    labels = labels[:, 0]

    graph.ndata['features'] = graph.ndata['feat']
    graph.ndata['labels'] = labels
    in_feats = graph.ndata['features'].shape[1]
    num_labels = len(th.unique(labels[th.logical_not(th.isnan(labels))]))

    # Find the node IDs in the training, validation, and test set.
    train_nid, val_nid, test_nid = splitted_idx['train'], splitted_idx[
        'valid'], splitted_idx['test']
    train_mask = th.zeros((graph.number_of_nodes(), ), dtype=th.bool)
    train_mask[train_nid] = True
    val_mask = th.zeros((graph.number_of_nodes(), ), dtype=th.bool)
    val_mask[val_nid] = True
    test_mask = th.zeros((graph.number_of_nodes(), ), dtype=th.bool)
    test_mask[test_nid] = True
    graph.ndata['train_mask'] = train_mask
    graph.ndata['val_mask'] = val_mask
    graph.ndata['test_mask'] = test_mask
    print('finish constructing', name)
    return graph, num_labels
Exemplo n.º 20
0
    def filter_valid(self, scores, labels, device):
        valid_scores = scores.reshape(-1, self.model.cfg.num_classes)
        valid_labels = labels.reshape(-1).to(device)

        ignored_bool = torch.zeros_like(valid_labels, dtype=torch.bool)
        for ign_label in self.dataset.cfg.ignored_label_inds:
            ignored_bool = torch.logical_or(ignored_bool,
                                            torch.eq(valid_labels, ign_label))

        valid_idx = torch.where(torch.logical_not(ignored_bool))[0].to(device)

        valid_scores = torch.gather(
            valid_scores, 0,
            valid_idx.unsqueeze(-1).expand(-1, self.model.cfg.num_classes))
        valid_labels = torch.gather(valid_labels, 0, valid_idx)

        # Reduce label values in the range of logit shape
        reducing_list = torch.arange(0,
                                     self.model.cfg.num_classes,
                                     dtype=torch.int64)
        inserted_value = torch.zeros([1], dtype=torch.int64)

        for ign_label in self.dataset.cfg.ignored_label_inds:
            reducing_list = torch.cat([
                reducing_list[:ign_label], inserted_value,
                reducing_list[ign_label:]
            ], 0)
        valid_labels = torch.gather(reducing_list.to(device), 0, valid_labels)

        valid_labels = valid_labels.unsqueeze(0)
        valid_scores = valid_scores.unsqueeze(0).transpose(-2, -1)

        return valid_scores, valid_labels
Exemplo n.º 21
0
def main(args):
    if not args.standalone:
        th.distributed.init_process_group(backend='gloo')

    dgl.distributed.initialize(args.ip_config, num_workers=args.num_workers)
    g = dgl.distributed.DistGraph(args.ip_config, args.graph_name, part_config=args.conf_path)
    print('rank:', g.rank())

    pb = g.get_partition_book()
    train_nid = dgl.distributed.node_split(g.ndata['train_mask'], pb, force_even=True)
    val_nid = dgl.distributed.node_split(g.ndata['val_mask'], pb, force_even=True)
    test_nid = dgl.distributed.node_split(g.ndata['test_mask'], pb, force_even=True)
    local_nid = pb.partid2nids(pb.partid).detach().numpy()
    print('part {}, train: {} (local: {}), val: {} (local: {}), test: {} (local: {})'.format(
        g.rank(), len(train_nid), len(np.intersect1d(train_nid.numpy(), local_nid)),
        len(val_nid), len(np.intersect1d(val_nid.numpy(), local_nid)),
        len(test_nid), len(np.intersect1d(test_nid.numpy(), local_nid))))
    device = th.device('cpu')
    labels = g.ndata['labels'][np.arange(g.number_of_nodes())]
    n_classes = len(th.unique(labels[th.logical_not(th.isnan(labels))]))
    print('#labels:', n_classes)

    # Pack data
    in_feats = g.ndata['features'].shape[1]
    data = train_nid, val_nid, test_nid, in_feats, n_classes, g
    run(args, device, data)
    print("parent ends")
Exemplo n.º 22
0
    def perturb(self, x, y=None, **kwargs):
        predictions = self.undefended_model(x)

        assert len(predictions) == len(x)

        num_classes = predictions.shape[1]

        predicted_labels = torch.argmax(predictions, axis=-1)

        target_labels = []

        for i, true_label in enumerate(predicted_labels):
            target_label = None

            while target_label is None or target_label == true_label:
                target_label = torch.randint(0, num_classes, (1, ))

            target_labels.append(target_label)

        target_labels = torch.cat(target_labels).to(predicted_labels.device)

        assert torch.all(
            torch.logical_not(torch.eq(predicted_labels, target_labels)))

        return self.attack_on_detector_classifier.perturb(x,
                                                          y=target_labels,
                                                          **kwargs).detach()
Exemplo n.º 23
0
    def forward(self, tensor_list: NestedTensor):
        xs = self.body(tensor_list.tensors)
        out: Dict[str, NestedTensor] = {}
        for name, x in xs.items():

            if 'layer' + name not in self.return_layers:
                continue

            #print(name, ", ", x.shape)
            m = tensor_list.mask
            assert m is not None
            mask = F.interpolate(m[None].float(),
                                 size=x.shape[-2:]).to(torch.bool)[0]

            # TODO: workaround to avoid NaN of attention calculation because of a full "True" mask
            invalid_indices = (torch.logical_not(mask).sum(
                dim=[1, 2]) == 0).nonzero().squeeze(-1)
            if (len(invalid_indices)):
                #print("workaround to avoid NaN for {}".format(invalid_indices))
                mask[invalid_indices] = torch.zeros(x.shape[-2:],
                                                    dtype=torch.bool,
                                                    device=mask.device)

            out[name] = NestedTensor(x, mask)
        return out, xs
Exemplo n.º 24
0
    def test_permute(self):
        def rows_equal(tensor_1, tensor_2, mask=None):
            if mask is None:
                mask = torch.ones(tensor_1.shape[0], dtype=torch.bool)
            is_equal = []
            for i, do_check in enumerate(mask):
                if do_check:
                    is_equal.append(torch.all(tensor_1[i,:] == tensor_2[i,:]))
            return torch.tensor(is_equal)

        is_permuted, x = permute(self.indexes, prop=0)
        npt.assert_array_equal(is_permuted, torch.zeros(self.batches, dtype=torch.bool))
        self.assertTrue(torch.all(rows_equal(x, self.indexes)))
        npt.assert_array_equal(x, self.indexes)  # this does the same thing, but tests rows_equal

        is_permuted, x = permute(self.indexes, prop=1.)
        npt.assert_array_equal(is_permuted, torch.ones(self.batches, dtype=torch.bool))
        self.assertFalse(torch.any(rows_equal(x, self.indexes)))

        is_permuted, x = permute(self.indexes, prop=0.5)
        self.assertTrue(torch.sum(is_permuted), self.batches // 2)
        is_not_permuted = torch.logical_not(is_permuted)
        self.assertTrue(torch.all(rows_equal(x, self.indexes, mask=is_not_permuted)))
        same = torch.masked_select(x.permute(1, 0), is_not_permuted)
        ref = torch.masked_select(self.indexes.permute(1, 0), is_not_permuted)
        npt.assert_array_equal(same, ref)  # double check the above line
        self.assertFalse(torch.any(rows_equal(x, self.indexes, mask=is_permuted)))
Exemplo n.º 25
0
def CowMix(X, target, model):
    """
  Mezcla las imagenes dentro de un mismo lote usando un mascara de formas
  irregulares.        
  """
    B, _, H, W = X.shape

    #Proporcion de pixeles que se remplazan
    p = torch.rand(B, 1, 1, 1, device=X.device)

    #Tamaño de las marcas de la mascara
    r = torch.randint(4, 16, (1, ), device=X.device)

    mask = torch.randn(B, 1, r, r, device=X.device)
    mask = F.interpolate(mask,
                         size=(H, W),
                         mode="bilinear",
                         align_corners=False)

    mean = mask.mean(dim=(1, 2, 3), keepdim=True)
    std = mask.std(dim=(1, 2, 3), keepdim=True)
    tao = mean + 1.4 * std * torch.erfinv(2 * p - 1)

    mask = mask > tao

    idx = torch.randperm(B)
    X = mask * X + torch.logical_not(mask) * X[idx]
    out = model(X)
    p = p.squeeze()
    loss = ((1 - p) * F.cross_entropy(out, target, reduction='none') +
            p * F.cross_entropy(out, target[idx], reduction='none'))
    return loss.mean(), out
Exemplo n.º 26
0
 def forward(ctx, x: torch.Tensor, spike: torch.Tensor):
     # y = x * spike
     # x乘spike,等价于将x中spike == 0的位置全部填充为0
     assert x.shape == spike.shape, print(
         'x.shape != spike.shape')  # 禁用广播机制
     if spike.dtype == torch.bool:
         mask = torch.logical_not(spike)
     else:
         mask = torch.logical_not(spike.bool())
     if x.requires_grad and spike.requires_grad:
         ctx.save_for_backward(mask, x)
     elif x.requires_grad and not spike.requires_grad:
         ctx.save_for_backward(mask)
     elif not x.requires_grad and spike.requires_grad:
         ctx.save_for_backward(x)
     return x.masked_fill(mask, 0)
Exemplo n.º 27
0
def get_mappings(iou_mat):
    mappings = torch.zeros_like(iou_mat)
    gt_count, pr_count = iou_mat.shape
    
    #first mapping (max iou for first pred_box)
    if not iou_mat[:,0].eq(0.).all():
        # if not a zero column
        mappings[iou_mat[:,0].argsort()[-1],0] = 1

    for pr_idx in range(1,pr_count):
        # Sum of all the previous mapping columns will let 
        # us know which gt-boxes are already assigned
        not_assigned = torch.logical_not(mappings[:,:pr_idx].sum(1)).long()

        # Considering unassigned gt-boxes for further evaluation 
        targets = not_assigned * iou_mat[:,pr_idx]

        # If no gt-box satisfy the previous conditions
        # for the current pred-box, ignore it (False Positive)
        if targets.eq(0).all():
            continue

        # max-iou from current column after all the filtering
        # will be the pivot element for mapping
        pivot = targets.argsort()[-1]
        mappings[pivot,pr_idx] = 1
    return mappings
Exemplo n.º 28
0
 def build_depot_mask(self):
     a = torch.arange(self.n_depot,
                      device=self.device).reshape(1, 1, -1).repeat(
                          self.batch, self.n_car, 1)
     b = self.car_start_node[:, :, None].repeat(1, 1, self.n_depot)
     depot_one_hot = (a == b).bool()  #.long()
     return depot_one_hot, torch.logical_not(depot_one_hot)
Exemplo n.º 29
0
    def _filter_predictions_velocity(self, X, y, var):
        """
        :param X: Nx3 position
        :param y: N values
        :return: thresholded X, y vals
        """

        # Filter -1 to 1
        min_filterout = X.max(dim=-1).values >= 1
        max_filterout = X.min(dim=-1).values <= -1
        mask = torch.logical_not(torch.logical_or(min_filterout, max_filterout))
        X = X[mask, :]
        y = y[mask, :]
        var = var[mask, :]

        if len(self.surface_threshold) == 1:
            mask = y.squeeze() >= self.surface_threshold[0]
        else:
            min_mask = y.squeeze() >= self.surface_threshold[0]
            max_mask = y.squeeze() <= self.surface_threshold[1]
            mask = torch.logical_and(min_mask, max_mask)

        X = X[mask, :]
        y = y[mask, :]
        var = var[mask, :]

        var_mask = var.squeeze(-1) <= self.variance_threshold
        X = X[var_mask, :]
        y = y[var_mask, :]
        var = var[var_mask, :]

        return X, y, var
Exemplo n.º 30
0
def _get_triplet_mask(labels):
    """Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
    A triplet (i, j, k) is valid if:
        - i, j, k are distinct
        - labels[i] == labels[j] and labels[i] != labels[k]
    Args:
        labels: tf.int32 `Tensor` with shape [batch_size]
    """
    # Check that i, j and k are distinct
    indices_equal = torch.eye(labels.shape[0], device=DEVICE).bool()
    indices_not_equal = indices_equal.logical_not()
    i_not_equal_j = indices_not_equal.unsqueeze(2)
    i_not_equal_k = indices_not_equal.unsqueeze(1)
    j_not_equal_k = indices_not_equal.unsqueeze(0)

    distinct_indices = torch.logical_and(torch.logical_and(i_not_equal_j, i_not_equal_k), j_not_equal_k)


    # Check if labels[i] == labels[j] and labels[i] != labels[k]
    label_equal = torch.eq(labels.unsqueeze(0), labels.unsqueeze(1))
    i_equal_j = label_equal.unsqueeze(2)
    i_equal_k = label_equal.unsqueeze(1)

    valid_labels = torch.logical_and(i_equal_j, torch.logical_not(i_equal_k))

    # Combine the two masks
    mask = torch.logical_and(distinct_indices, valid_labels)

    return mask