예제 #1
0
파일: math_ops.py 프로젝트: malfet/pytorch
 def comparison_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.greater_equal(a, b),
         torch.gt(a, b),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
예제 #2
0
    def create_attention_mask(self, bs, seq_len, windows, block_length,
                              attention_mask):
        ticker = torch.arange(seq_len)[None, :]
        b_t = ticker.reshape(1, windows, block_length)

        bq_t = b_t
        bq_k = self.look_around(b_t, block_length, self.window_size)

        # compute attn mask
        # this matches the original implem in mess-tensorflow
        # https://github.com/tensorflow/mesh/blob/8bd599a21bad01cef1300a8735c17306ce35db6e/mesh_tensorflow/transformer/attention.py#L805
        relative_position = bq_k.unsqueeze(-2) - bq_t.unsqueeze(-1)
        relative_position = relative_position.transpose(-1, -2)

        sequence_id = torch.ones(bs, seq_len)
        q_seq = sequence_id.reshape(-1, windows, block_length)
        m_seq = sequence_id.reshape(-1, windows, block_length)
        m_seq = self.look_around(m_seq, block_length, self.window_size)

        if attention_mask is not None:
            attention_mask = attention_mask.to(m_seq.device)
            attention_mask = attention_mask.reshape(-1, windows, block_length)
            attention_mask = self.look_around(attention_mask, block_length,
                                              self.window_size)
            m_seq *= attention_mask

        visible = torch.eq(q_seq.unsqueeze(-1),
                           m_seq.unsqueeze(-2)).transpose(-1, -2)
        visible = torch.logical_and(
            visible, torch.gt(relative_position, -self.window_size))
        mask = torch.logical_and(visible, torch.less_equal(
            relative_position, 0)).transpose(-1, -2).unsqueeze(2)
        return mask
예제 #3
0
def decide_with_nms(model:Experiment,dataloader:DataLoader):
    for i,data in enumerate(dataloader):
        _,raman = data
        predicted_spectrum =  model(data)
        predicted_confidence = predicted_spectrum[:,:,:,0] # (batch_size, 25, 2)
        predicted_confidence = predicted_confidence.view((-1,50)) # (batch_size, 50)
        predicted_confidence_bak = predicted_confidence.clone() 
        target_confidence = raman[:,:,0] # (batch_size, 25)
        Ac_list = np.array([])
        Pr_list = np.array([])
        Rc_list = np.array([])
        F1_list = np.array([])
        for top_k in range(1,51):
            predicted_confidence = torch.argsort(predicted_confidence, dim=1, descending=True) # (batch_size, 50)
            predicted_confidence = torch.less_equal(predicted_confidence,top_k-1).float().view((-1,25,2)).sum(dim=-1).sgn() #(batch_size,25)
            Accuracy, Precision,Recall,F1Score = Experiment.scores(predicted_confidence,target_confidence)
            Ac_list = np.append(Ac_list,Accuracy.item())
            Pr_list = np.append(Pr_list,Precision.item())
            Rc_list = np.append(Rc_list,Recall.item())
            F1_list = np.append(F1_list,F1Score.item())
            predicted_confidence = predicted_confidence_bak.clone()
    fig = make_subplots(rows=1,cols=2)
    top_k_list = [i for i in range(1,51)]
    fig.add_trace(go.Scatter(x=top_k_list,y=Ac_list,name="Accuracy"),row=1,col=1)
    fig.add_trace(go.Scatter(x=top_k_list,y=Pr_list,name="Precision"),row=1,col=1)
    fig.add_trace(go.Scatter(x=top_k_list,y=Rc_list,name="Recall"),row=1,col=1)
    fig.add_trace(go.Scatter(x=top_k_list,y=F1_list,name="F1Score"),row=1,col=1)
    fig.add_trace(go.Scatter(x=Rc_list,y=Pr_list,name="Pr-Rc"),row=1,col=2)
    return fig
예제 #4
0
def decide_with_cut_off(model:Experiment,dataloader:DataLoader):
    for i,data in enumerate(dataloader):
        _,raman = data
        predicted_spectrum =  model(data)
        predicted_confidence = predicted_spectrum[:,:,:,0]
        predicted_confidence_backup,_ = torch.max(predicted_confidence, dim=2)
        predict_confidence = predicted_confidence_backup.clone()
        target_confidence = raman[:,:,0]
    cut_off_list = np.linspace(0.2,0.8,60)
    Ac_list = np.array([])
    Pr_list = np.array([])
    Rc_list = np.array([])
    F1_list = np.array([])
    for cut_off in cut_off_list:
        less = torch.less_equal(predict_confidence,cut_off)
        predict_confidence[less] = 0
        predict_confidence[torch.logical_not(less)] = 1
        Accuracy, Precision,Recall,F1Score = Experiment.scores(predict_confidence,target_confidence)
        predict_confidence = predicted_confidence_backup.clone()
        Ac_list = np.append(Ac_list,Accuracy.item())
        Pr_list = np.append(Pr_list,Precision.item())
        Rc_list = np.append(Rc_list,Recall.item())
        F1_list = np.append(F1_list,F1Score.item())
    fig = make_subplots(rows=1,cols=2)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Ac_list,name="Accuracy"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Pr_list,name="Precision"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=Rc_list,name="Recall"),row=1,col=1)
    fig.add_trace(go.Scatter(x=cut_off_list,y=F1_list,name="F1Score"),row=1,col=1)
    fig.add_trace(go.Scatter(x=Rc_list,y=Pr_list,name="Pr-Rc"),row=1,col=2)
    return fig
예제 #5
0
파일: test_ssim.py 프로젝트: akamaus/piq
def test_ssim_measure_is_less_or_equal_to_one(
        ones_zeros_4d_5d: Tuple[torch.Tensor,
                                torch.Tensor], device: str) -> None:
    # Create two maximally different tensors.
    ones = ones_zeros_4d_5d[0].to(device)
    zeros = ones_zeros_4d_5d[1].to(device)
    measure = ssim(ones, zeros, data_range=1., reduction='none')
    assert torch.less_equal(measure,
                            1).all(), f'SSIM must be <= 1, got {measure}'
예제 #6
0
def NMS_or_not(predicted, nms: bool = False, cut_off: float = 0.5):
    predicted = predicted[0]
    predicted_confidence = predicted[:, :, 0]  # (25,2)
    predicted_position = predicted[:, :, 1]  # (25,2)
    predicted_position = absolute_position(predicted_position)  # (25,2)
    predicted_confidence, selected_worker = torch.max(predicted_confidence,
                                                      dim=-1)  # (25,)
    # predict_position[i,j=0] = predicted_postion[i, selected_worker[i,j=0]]
    selected_worker = selected_worker.view((-1, 1))
    predicted_position = torch.gather(predicted_position, 1,
                                      selected_worker).flatten()
    less = torch.less_equal(predicted_confidence, cut_off)
    more = torch.logical_not(less)
    predicted_confidence_round = predicted_confidence.clone()
    predicted_confidence_round[less] = 0
    predicted_confidence_round[more] = 1
    return predicted_confidence, predicted_position, predicted_confidence_round
예제 #7
0
 def forward(self):
     a = torch.tensor(0)
     b = torch.tensor(1)
     return len(
         torch.allclose(a, b),
         torch.argsort(a),
         torch.eq(a, b),
         torch.eq(a, 1),
         torch.equal(a, b),
         torch.ge(a, b),
         torch.ge(a, 1),
         torch.greater_equal(a, b),
         torch.greater_equal(a, 1),
         torch.gt(a, b),
         torch.gt(a, 1),
         torch.greater(a, b),
         torch.isclose(a, b),
         torch.isfinite(a),
         torch.isin(a, b),
         torch.isinf(a),
         torch.isposinf(a),
         torch.isneginf(a),
         torch.isnan(a),
         torch.isreal(a),
         torch.kthvalue(a, 1),
         torch.le(a, b),
         torch.le(a, 1),
         torch.less_equal(a, b),
         torch.lt(a, b),
         torch.lt(a, 1),
         torch.less(a, b),
         torch.maximum(a, b),
         torch.minimum(a, b),
         torch.fmax(a, b),
         torch.fmin(a, b),
         torch.ne(a, b),
         torch.ne(a, 1),
         torch.not_equal(a, b),
         torch.sort(a),
         torch.topk(a, 1),
         torch.msort(a),
     )
예제 #8
0
 def __le__(self, other):
     x0, x1 = self._to_binary_tensor_args(other)
     y = torch.less_equal(x0._t, x1._t)
     s = _ox.less_equal(*_EagerTensor.ox_args([x0, x1]))
     return self.from_torch(y, s)
    def __mask_assign_targets_anchors_torch(
            self, batch_points, batch_anchors_3d, batch_gt_boxes_3d,
            batch_gt_labels, minibatch_size, positive_rate, pos_iou, neg_iou,
            effective_sample_range, valid_mask):
        """ Mask assign targets function
        batch_points: [bs, points_num, 3]
        batch_anchors_3d: [bs, points_num, cls_num, 7]
        batch_gt_boxes_3d: [bs, gt_num, 7]
        batch_gt_labels: [bs, gt_num]
        valid_mask: [bs, points_num, cls_num]

        return:
            assigned_idx: [bs, points_num, cls_num], int32, the index of groundtruth
            assigned_pmask: [bs, points_num, cls_num], float32
            assigned_nmask: [bs, points_num, cls_num], float32
        """
        bs, pts_num, cls_num, _ = batch_anchors_3d.shape

        positive_size = int(minibatch_size * positive_rate)

        batch_assigned_idx = torch.zeros([bs, pts_num, cls_num
                                          ]).long().to(batch_points.device)
        batch_assigned_pmask = torch.zeros([bs, pts_num, cls_num
                                            ]).float().to(batch_points.device)
        batch_assigned_nmask = torch.zeros([bs, pts_num, cls_num
                                            ]).float().to(batch_points.device)

        for i in range(bs):
            cur_points = batch_points[i]
            cur_anchors_3d = batch_anchors_3d[i]  # [pts_num, cls_num, 3/7]
            cur_valid_mask = valid_mask[i]  # [pts_num, cls_num]

            # gt_num
            cur_gt_labels = batch_gt_labels[i]  # [gt_num]
            cur_gt_boxes_3d = batch_gt_boxes_3d[i]  # [gt_num, 7]

            # first filter gt_boxes
            filter_idx = torch.where(
                torch.any(torch.not_equal(cur_gt_boxes_3d, 0),
                          dim=-1))[0].to(cur_gt_labels.device)
            cur_gt_labels = cur_gt_labels[filter_idx]
            cur_gt_boxes_3d = cur_gt_boxes_3d[filter_idx]

            cur_points_numpy = cur_points.cpu().detach().numpy()
            cur_gt_boxes_3d_numpy = cur_gt_boxes_3d.cpu().detach().numpy()

            points_mask_numpy = check_inside_points(
                cur_points_numpy, cur_gt_boxes_3d_numpy)  # [pts_num, gt_num]
            points_mask = torch.from_numpy(points_mask_numpy).int().to(
                cur_points.device)
            sampled_gt_idx_numpy = np.argmax(points_mask_numpy, axis=-1)
            sampled_gt_idx = torch.from_numpy(sampled_gt_idx_numpy).long().to(
                cur_points.device)  # [pts_num]
            # used for label_mask
            assigned_gt_label = cur_gt_labels[sampled_gt_idx]  # [pts_num]
            assigned_gt_label = assigned_gt_label - 1  # 1... -> 0...
            # used for dist_mask
            assigned_gt_boxes = cur_gt_boxes_3d[sampled_gt_idx]  # [pts_num, 7]
            # then calc the distance between anchors and assigned_boxes
            # dist = cur_anchors_3d[:, :, :3] - assigned_gt_boxes[:, 0:3].unsqueeze(dim=1).repeat((1, cur_anchors_3d.shape[1], 1))
            # dist = torch.sqrt(torch.sum(dist * dist, dim=-1))
            dist = torch.linalg.norm(
                cur_anchors_3d[:, :, :3] -
                assigned_gt_boxes[:, 0:3].unsqueeze(dim=1).repeat(
                    (1, cur_anchors_3d.shape[1], 1)),
                dim=-1)

            filtered_assigned_idx = filter_idx[sampled_gt_idx]  # [pts_num]
            filtered_assigned_idx = filtered_assigned_idx.view(pts_num,
                                                               1).repeat(
                                                                   (1,
                                                                    cls_num))
            batch_assigned_idx[i] = filtered_assigned_idx

            # then we generate pos/neg mask
            if cls_num == 1:  # anchor_free
                label_mask = torch.ones(
                    (pts_num, cls_num)).float().to(points_mask.device)
            else:  # multiple anchors
                label_mask = np.tile(
                    np.reshape(np.arange(cls_num), [1, cls_num]), [pts_num, 1])
                label_mask = np.equal(label_mask,
                                      assigned_gt_label[:, np.newaxis]).astype(
                                          np.float32)

            pmask = torch.max(points_mask, dim=1)[0] > 0
            dist_mask = torch.less_equal(
                dist, effective_sample_range)  # pts_num, cls_num
            pmask = torch.logical_and(pmask.unsqueeze(-1), dist_mask)
            pmask = pmask.float() * label_mask
            pmask = pmask * cur_valid_mask

            nmask = torch.max(points_mask, dim=1)[0] == 0
            nmask = nmask.view(pts_num, 1).repeat((1, cls_num))
            nmask = nmask.float() * label_mask
            nmask = nmask * cur_valid_mask

            # then randomly sample
            if minibatch_size != -1:
                pts_pmask = np.any(pmask, axis=1)  # pts_num
                pts_nmask = np.any(nmask, axis=1)  # [pts_num]

                positive_inds = np.where(pts_pmask)[0]
                cur_positive_num = np.minimum(len(positive_inds),
                                              positive_size)
                if cur_positive_num > 0:
                    positive_inds = np.random.choice(positive_inds,
                                                     cur_positive_num,
                                                     replace=False)
                pts_pmask = np.zeros_like(pts_pmask)
                pts_pmask[positive_inds] = 1

                cur_negative_num = minibatch_size - cur_positive_num
                negative_inds = np.where(pts_nmask)[0]
                cur_negative_num = np.minimum(len(negative_inds),
                                              cur_negative_num)
                if cur_negative_num > 0:
                    negative_inds = np.random.choice(negative_inds,
                                                     cur_negative_num,
                                                     replace=False)
                pts_nmask = np.zeros_like(pts_nmask)
                pts_nmask[negative_inds] = 1

                pmask = pmask * pts_pmask[:, np.newaxis]
                nmask = nmask * pts_nmask[:, np.newaxis]

            batch_assigned_pmask[i] = pmask
            batch_assigned_nmask[i] = nmask
        return batch_assigned_idx, batch_assigned_pmask, batch_assigned_nmask