コード例 #1
0
def compute_deg_hist(ds):
    max_deg = max([
        max(degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long)).item()
        for data in ds
    ])
    deg = torch.zeros(max_deg + 1, dtype=torch.long)
    for data in ds:
        d = degree(data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long)
        deg += torch.bincount(d, minlength=deg.numel())
    return deg
コード例 #2
0
ファイル: util.py プロジェクト: jimeffry/bisenet-pytorch
def fast_hist(a, b, n):
    '''
    a and b are predict and mask respectively
    n is the number of classes
    '''
    # k = (a >= 0) & (a < n)
    # return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
    a = a.flatten()
    b = b.flatten()
    return torch.bincount(n * a.int() + b.int(), minlength=n**2).view(n, n)
コード例 #3
0
ファイル: seg_utils.py プロジェクト: templeblock/hyperseg
 def update(self, a, b):
     with torch.no_grad():
         n = self.num_classes
         if self.mat is None:
             self.mat = torch.zeros((n, n),
                                    dtype=torch.int64,
                                    device=a.device)
         k = (a >= 0) & (a < n)
         inds = n * a[k].to(torch.int64) + b[k]
         self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
コード例 #4
0
    def save(x, y, dir):
        for i, (x_i, y_i) in enumerate(zip(x, y)):
            plt.imshow(x_i)
            plt.axis("off")
            y_encoded = torch.bincount(torch.tensor(y_i), minlength=10)
            plt.title("Label: {}\nEncoded: {}".format(y_i, y_encoded))
            plt.savefig(os.path.join(dir, "%s.png" % i))

            if i > 10:
                break
コード例 #5
0
ファイル: utils.py プロジェクト: d3m0n-r00t/ASL
def iIoU_class(pred, target, num_classes, verbose=False):
	"""[summary]
	It is well-known that the global IoU measure is biased toward object instances that cover a large image area. 
	In street scenes with their strong scale variation this can be problematic. 
	Specifically for traffic participants, which are the key classes in our scenario, 
	we aim to evaluate how well the individual instances in the scene are represented in the labeling. 
	To address this, we additionally evaluate the semantic labeling using an 
	
	instance-level intersection-over-union metric iIoU = iTP ⁄ (iTP+FP+iFN). 
	Again iTP, FP, and iFN denote the numbers of true positive, false positive, and false negative pixels, respectively. 
	However, in contrast to the standard IoU measure, iTP and iFN are computed by weighting the contribution of each pixel by the ratio of the class’ 
	average instance size to the size of the respective ground truth instance.
	It is important to note here that unlike the instance-level task below, 
	we assume that the methods only yield a standard per-pixel semantic class labeling as output. 
	Therefore, the false positive pixels are not associated with any instance and thus do not require normalization. 
	The final scores, iIoUcategory and iIoUclass, are obtained as the means for the two semantic granularities.


	Parameters
	----------
	pred : [torch.tensor]
			BSxD1xD2xD3 , predict class for each pixel. No need to predict the -1 class! element of 0-(num_classes-1)
	target : [torch.tensor]
			BSxD1xD2xD3	, -1 for the VOID pixels that should not induce an error! element of -1-(num_classes-1)
	num_classes : [int]
			invalid class does not count as a class. So lets say targets takes values -1 - 19 then you have 20 classes
	"""
	
	BS = pred.shape[0]
	# add 1 so the index ranges from 0 to NUM_CLASSES
	pred = pred.type(torch.int) + 1
	target = target.type(torch.int) + 1
	# NOW class=0 should not induce a loss

	# Set pixels that are predicted but no label is available to 0. These pixels dont enduce a loss. 
	# Neither does the IoU  of class 0 nor do these pixels count to the UNION for the other classes if predicted wrong. 
	pred = pred * (target > 0).type(pred.dtype) 
	iou_per_image = torch.zeros( (BS), device=pred.device)
	
	# we have to do this calculation for each image. 
	for b in range(BS):
		weight = torch.bincount(target[b].flatten())[1:] 
		weight = weight/ weight.sum()
		w = torch.zeros( (num_classes), device=target.device, dtype=weight.dtype )
		w[:weight.shape[0]] = weight
		TPS, FPS, TNS, FNS, _ = stat_scores_multiple_classes(pred[b], target[b], num_classes+1)
		if verbose: 
			print(f'TPS:{TPS}, \nFPS:{FPS}, \nFNS:{FNS}, \nTNS:{TNS}')
			print(F'Inter: {TPS},\nUnion: {TPS+FPS+FNS}')
		IoU = (TPS[1:]*w) / ((TPS[1:]*w) + FPS[1:] + (FNS[1:]*w) )

		mIoU = (IoU[torch.isnan(IoU)==False]).mean()
		iou_per_image[b] = mIoU
		
	return torch.mean( iou_per_image ) #returns mean over batch
def test(args, model, device, test_loader, criterion):
    # Switch model to evaluation mode. This is necessary for layers like dropout, batchnorm etc which behave differently in training and evaluation mode
    model.eval()
    test_loss = 0
    total = 0
    correct = 0
    best_loss = 1
    num_classes = 23
    class_correct = list(0. for i in range(num_classes))
    class_total = list(0. for i in range(num_classes))
    example_images = []
    with torch.no_grad():
        for data, target in test_loader:
            # Load the input features and labels from the test dataset
            data, target = data.to(device), target.to(device)

            # Make predictions: Pass image data from test dataset, make predictions about class image belongs to (0-9 in this case)
            output = model(data)

            # Compute the loss sum up batch loss
            test_loss += criterion(output, target).item()
            #test_loss += F.nll_loss(output, target, reduction='sum').item()

            # Get the index of the max log-probability
            pred = output.max(1, keepdim=True)[1][:,0]
            per_class_count = torch.bincount(target, minlength=num_classes)
            #correct += pred.eq(target.view_as(pred)).sum().item()
            for i in range(num_classes):
                class_total[i] += per_class_count[i]
                mask = target == i
                p = pred[mask]
                t = target[mask]
                class_correct[i] += p.eq(t.view_as(p)).sum().item()
            total += pred.eq(pred).sum().item()
            correct += pred.eq(target.view_as(pred)).sum().item()

            # WandB  Log images in your test dataset automatically, along with predicted and true labels by passing pytorch tensors with image data into wandb.Image
            example_images.append(wandb.Image(
                data[0], caption="Pred: {} Truth: {}".format(pred[0].item(), target[0])))

    # WandB  wandb.log(a_dict) logs the keys and values of the dictionary passed in and associates the values with a step.
    # You can log anything by passing it to wandb.log, including histograms, custom matplotlib objects, images, video, text, tables, html, pointclouds and other 3D objects.
    # Here we use it to log test accuracy, loss and some test images (along with their true and predicted labels).
    #wandb.log({
    #    "Examples": example_images,
    #    "Test Accuracy": 100. * correct / len(test_loader.dataset),
    #    "Test Loss": test_loss})
    log_dict = {
        "Examples": example_images,
        "Test Accuracy": 100. * correct / len(test_loader.dataset),
        "Test Loss": test_loss}
    for i in range(num_classes):
        log_dict["Test Accuracy - " + list(test_loader.dataset.categories.keys())[i]] = 100. * class_correct[i] / class_total[i]
    #print(log_dict)
    wandb.log(log_dict)
コード例 #7
0
def compute_present_locations(args, corpus, cache_filename,
        model, segmenter, classnum, full_sample):
    # Phase 1.  Identify a set of locations where there are doorways.
    # Segment the image and find featuremap pixels that maximize the number
    # of doorway pixels under the featuremap pixel.
    if all(k in corpus for k in ['present_indices',
            'object_present_sample', 'object_present_location',
            'object_location_popularity', 'weighted_mean_present_feature']):
        return
    progress = default_progress()
    feature_shape = model.feature_shape[args.layer][2:]
    num_locations = numpy.prod(feature_shape).item()
    num_units = model.feature_shape[args.layer][1]
    with torch.no_grad():
        weighted_feature_sum = torch.zeros(num_units).cuda()
        object_presence_scores = []
        for [zbatch] in progress(
                torch.utils.data.DataLoader(TensorDataset(full_sample),
                batch_size=args.inference_batch_size, num_workers=10,
                pin_memory=True),
                desc="Object pool"):
            zbatch = zbatch.cuda()
            tensor_image = model(zbatch)
            segmented_image = segmenter.segment_batch(tensor_image,
                    downsample=2)
            mask = (segmented_image == classnum).max(1)[0]
            score = torch.nn.functional.adaptive_avg_pool2d(
                    mask.float(), feature_shape)
            object_presence_scores.append(score.cpu())
            feat = model.retained_layer(args.layer)
            weighted_feature_sum += (feat * score[:,None,:,:]).view(
                    feat.shape[0],feat.shape[1], -1).sum(2).sum(0)
        object_presence_at_feature = torch.cat(object_presence_scores)
        object_presence_at_image, object_location_in_image = (
                object_presence_at_feature.view(args.search_size, -1).max(1))
        best_presence_scores, best_presence_images = torch.sort(
                -object_presence_at_image)
        all_present_indices = torch.sort(
                best_presence_images[:(args.train_size+args.eval_size)])[0]
        corpus.present_indices = all_present_indices[:args.train_size]
        corpus.object_present_sample = full_sample[corpus.present_indices]
        corpus.object_present_location = object_location_in_image[
                corpus.present_indices]
        corpus.object_location_popularity = torch.bincount(
            corpus.object_present_location,
            minlength=num_locations)
        corpus.weighted_mean_present_feature = (weighted_feature_sum.cpu() / (
            1e-20 + object_presence_at_feature.view(-1).sum()))
        corpus.eval_present_indices = all_present_indices[-args.eval_size:]
        corpus.eval_present_sample = full_sample[corpus.eval_present_indices]
        corpus.eval_present_location = object_location_in_image[
                corpus.eval_present_indices]

    if cache_filename:
        numpy.savez(cache_filename, **corpus)
コード例 #8
0
    def build_latent_distribution(self, alpha: int = 1):
        """Two passes:num_images_pixels
            1. we calculate the minimum latent value across our entire training
                distribution,
            2. we then add |min| to the latent values such that they are all >= 0 and
                then use torch.bincount to get discrete value counts
          -> which we then laplace smooth and convert into a CDF.
          If a code is in multiple parts, e.g lateral FPN features, they are flattened and concatenated.
        """
        self.cdf = dict()
        self.min_val = torch.tensor(0.0).to(self.device).long()
        num_images = 0
        self.model.eval()
        if self.negative_codes:
            for batch in self.train_loader:
                with torch.no_grad():
                    out_dict = self.model(batch)
                    for code_feat in self.code_feats:
                        self.min_val = torch.min(
                            self.min_val,
                            out_dict[code_feat].long().min(),
                        )
                num_images += len(batch)
                if num_images > self.num_train_images:
                    break

        self.min_val = self.min_val.abs()
        self.bins = torch.tensor([0.0]).to(self.device).long()
        num_images = 0
        for batch in self.train_loader:
            with torch.no_grad():
                out_dict = self.model(batch)
                flat_codes = []
                for code_feat in self.code_feats:
                    flat_codes.append(out_dict[code_feat].long().flatten() +
                                      self.min_val)
                batch_bins = torch.bincount(torch.cat(flat_codes))
                if len(batch_bins) > len(self.bins):
                    batch_bins[:len(self.bins)] += self.bins
                    self.bins = batch_bins
                elif len(self.bins) > len(batch_bins):
                    self.bins[:len(batch_bins)] += batch_bins
                else:
                    self.bins += batch_bins
            num_images += len(batch)
            if num_images > self.num_train_images:
                break

        bins = self.bins.float()
        bins_smooth = ((bins + alpha) / (bins.sum() + len(bins) * alpha)
                       ).cpu()  # additive smooth counts using alpha
        self.cdf = rc.prob_to_cum_freq(bins_smooth,
                                       resolution=2 *
                                       len(bins_smooth))  # convert pdf -> cdf
        self.model.train()
def fuse_mask(n_mask, r_mask):
    base = torch.where(n_mask > 0,
                       torch.tensor(1).cuda(cuda_id),
                       torch.tensor(0).cuda(cuda_id)).float()
    areas = torch.max(n_mask)
    #for i in range(1,torch.max(r_mask).long()+1):
    i = 1
    shift = torch.where(r_mask == i,
                        torch.tensor(1).cuda(cuda_id),
                        torch.tensor(0).cuda(cuda_id)).float()
    non_overlap = torch.where(base - shift == -1,
                              torch.tensor(1).cuda(cuda_id),
                              torch.tensor(0).cuda(cuda_id)).float()
    overlap = shift - non_overlap
    if torch.sum(non_overlap) / torch.sum(shift) > 0.4:
        areas += 1
        n_mask = torch.where(non_overlap == 1, areas, n_mask)
        base = torch.where(n_mask > 0,
                           torch.tensor(1).cuda(cuda_id),
                           torch.tensor(0).cuda(cuda_id)).float()
        #print(areas)
    else:
        area_num = torch.argmax(
            torch.bincount(
                torch.where(
                    overlap.long() == 1, n_mask.long(),
                    torch.tensor(0).cuda(cuda_id)).view(-1))[1:]).float() + 1
        n_mask = torch.where(non_overlap == 1, area_num, n_mask)
        base = torch.where(n_mask > 0,
                           torch.tensor(1).cuda(cuda_id),
                           torch.tensor(0).cuda(cuda_id)).float()
        #print(areas)


#     areas_nums=torch.tensor(1).float().cuda(cuda_id)
#     for i in range(1,torch.max(n_mask).long()+1):
#         region=torch.where(n_mask==i,torch.tensor(1).cuda(cuda_id),torch.tensor(0).cuda(cuda_id)).float()
#         pixels=region.nonzero()
#         if pixels.shape[0]>0:
#             minx=torch.min(pixels[:,0])
#             maxx=torch.max(pixels[:,0])
#             miny=torch.min(pixels[:,1])
#             maxy=torch.max(pixels[:,1])
#             for i in range(1,torch.ceil((maxx-minx).float()/80).int()+1):
#                 for j in range(1,torch.ceil((maxy-miny).float()/80).int()+1):
#                     if torch.sum(region[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j])>400:
#                         region[minx+80*(i-1):minx+80*i,miny+80*(j-1):miny+80*j]*=i*j
#             areas=torch.unique(region).sort()[0]
#             for i in range(1,len(areas)):
#                 region=torch.where(region==areas[i],-areas_nums,region)
#                 areas_nums+=1
#             n_mask=torch.where(n_mask==i,region,n_mask)
#     n_mask=-n_mask

    return n_mask
コード例 #10
0
    def forward(self, data):
        joints = data.y
        joints_norepeat = []
        joints_batch = []
        joints_sample_1 = []
        joints_sample_2 = []
        pair_batch = []
        label = []
        accumulate_start_pair = 0
        for i in range(len(torch.unique(data.batch))):
            joints_sample = joints[data.batch == i, :]
            joints_sample = joints_sample[:data.num_joint[i], :]
            joints_norepeat.append(joints_sample)
            joints_batch.append(data.batch.new_full((data.num_joint[i],), i))
            pair_idx = data.pairs[accumulate_start_pair: accumulate_start_pair + data.num_pair[i]]
            accumulate_start_pair += data.num_pair[i]

            if np.random.uniform() > 0.5:
                joints_sample_1.append(joints_sample[pair_idx[:, 0].long()])
                joints_sample_2.append(joints_sample[pair_idx[:, 1].long()])
            else:
                joints_sample_1.append(joints_sample[pair_idx[:, 1].long()])
                joints_sample_2.append(joints_sample[pair_idx[:, 0].long()])
            pair_batch.append(data.batch.new_full((data.num_pair[i],), i))
            label.append(pair_idx[:, -1])
        joints_norepeat = torch.cat(joints_norepeat, dim=0)
        joints_batch = torch.cat(joints_batch).long()
        pair_batch = torch.cat(pair_batch).long()
        joints_sample_1 = torch.cat(joints_sample_1, dim=0)
        joints_sample_2 = torch.cat(joints_sample_2, dim=0)
        label = torch.cat(label, dim=0).unsqueeze(1)

        joints_pair = torch.cat((joints_sample_1, joints_sample_2, data.pairs[:, 2:4]), dim=1)
        pair_feature = self.expand_joint_feature(joints_pair)
        joint_feature = self.joint_encoder(joints_norepeat, joints_batch)
        joint_feature = torch.repeat_interleave(joint_feature, torch.bincount(pair_batch), dim=0)
        shape_feature = self.shape_encoder(data)
        shape_feature = torch.repeat_interleave(shape_feature, torch.bincount(pair_batch), dim=0)
        pair_feature = torch.cat((shape_feature, joint_feature, pair_feature), dim=1)

        pre_label = self.mix_transform(pair_feature)
        return pre_label, label
コード例 #11
0
 def update(self, outputs, targets):
     with torch.no_grad():
         outputs = outputs.argmax(dim=1, keepdim=True)
         outputs, targets = outputs.cpu(), targets.cpu()
         for output, target in zip(outputs, targets):
             output, target = output.flatten(), target.flatten()
             mask = (target >= 0) * (target < self.n_classes)
             self.confusion_matrix += torch.bincount(
                 self.n_classes * target[mask] + output[mask],
                 minlength=self.n_classes**2).reshape(
                     self.n_classes, self.n_classes)
コード例 #12
0
 def __init__(self, labels: Union[List[int], torch.Tensor], batch_size: int,
              num_identities: int, num_iterations: int):
     self.num_identities = num_identities
     self.num_iterations = num_iterations
     self.samples_per_id = batch_size // num_identities
     self.labels = torch.as_tensor(labels, dtype=torch.long)
     self.counts = torch.bincount(self.labels)
     self.label_indices = [
         torch.nonzero(self.labels == i).squeeze(1).tolist()
         for i in range(len(self.counts))
     ]
コード例 #13
0
ファイル: metric.py プロジェクト: bencyq/Deep-Learning-Note
    def update(self, target, output):

        n = self.num_classes
        if self.mat is None:
            self.mat = torch.zeros((n, n),
                                   dtype=torch.int64,
                                   device=target.device)
        with torch.no_grad():
            k = (target >= 0) & (target < n)
            inds = n * target[k].to(torch.int64) + output[k]
            self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
コード例 #14
0
 def compute_mean_seg_in_images(batch_z, *args):
     img = model(batch_z.cuda())
     seg = segmodel.segment_batch(img, downsample=4)
     seg_area = seg.shape[2] * seg.shape[3]
     seg_counts = torch.bincount(
         (seg + (num_seglabels * torch.arange(
             seg.shape[0], dtype=seg.dtype,
             device=seg.device)[:, None, None, None])).view(-1),
         minlength=num_seglabels * seg.shape[0]).view(seg.shape[0], -1)
     seg_fracs = seg_counts.float() / seg_area
     return seg_fracs
    def mode(self, list, dim):
        out_mode = []
        stack_list = tr.stack(list, dim)
        for i in range(stack_list.size(0)):
            sample_i = stack_list[i]
            mode = tr.bincount(sample_i)
            mode = tr.argmax(mode)
            out_mode.append(mode)
        out_mode = tr.stack(out_mode, 0)

        return out_mode
コード例 #16
0
ファイル: model.py プロジェクト: chungdz/GCE-GNN
def forward(model, i, data):
    # 改成用 geometric的Data格式
    items, targets, mask, batch, seq = data.x, data.y, data.sequence_mask, data.batch, data.sequence
    seq = seq.view(targets.shape[0], -1)
    mask = mask.view(targets.shape[0], -1)

    A = []
    # datas = data.to_data_list()
    # graphs = [to_networkx(d) for d in datas]
    # A = [nx.convert_matrix.to_pandas_adjacency(g).values for g in graphs]  # 無向圖adj = in + out
    # A_out = [g for g in graphs]  # 有向圖的adj就是A_out

    # todo 解決cpu usage高的問題
    # global graph
    gg = model.global_data
    gg_edge_index = gg.edge_index
    # 直接對 batch下所有node做NeighborSample
    batch_nodes = seq.flatten()
    # batch_nodes = torch.unique(batch_nodes)  # 取unique node in batch sessions
    # batch_nodes = batch_nodes[batch_nodes!=0]  # 移除padding node id
    # sample as whole batch, 從大graph中找session graph內的node id的鄰居
    # subgraph_loaders = NeighborSampler(gg_edge_index, node_idx=batch_nodes, sizes=[-1], shuffle=False, num_workers=0, batch_size=batch_nodes.shape[0])  # all neighbors
    # fixme 放全部node
    subgraph_loaders = NeighborSampler(
        gg_edge_index,
        node_idx=batch_nodes,
        sizes=[10, 5],
        shuffle=False,
        num_workers=0,
        batch_size=batch_nodes.shape[0])  # 2 hop

    hidden, pad, g_h = model(items, A, data.edge_index,
                             subgraph_loaders)  # session graph node embeddings
    # 推回原始序列
    sections = torch.bincount(batch).cpu().numpy()
    # split whole x back into graphs G_i
    hidden = torch.split(hidden, tuple(sections))

    # todo 增加不考慮padding的選項
    mask_true = True
    if mask_true:
        leng = mask.shape[1]  # padding完的session長度
        alias_inputs = data.alias_inputs
        s_len = data.sequence_len.cpu().numpy().tolist()
        alias_inputs = torch.split(alias_inputs, s_len)
        seq_hidden = torch.stack([
            get(pad, i, hidden, alias_inputs, leng)
            for i in torch.arange(len(alias_inputs)).long()
        ])
        g_h = g_h.view([len(hidden), leng, -1])
    else:
        seq_hidden = hidden
    seq_hidden += g_h
    return targets, model.compute_scores(seq_hidden, mask, mask_true)
コード例 #17
0
ファイル: losses.py プロジェクト: AmmieQi/ksptrack
    def forward(self, z, targets):

        targets_ = targets.argmax(dim=1).to(z.device)

        bc = torch.bincount(targets_)
        freq_weights = bc.max() / bc.float()
        freq_smp_weights = freq_weights[targets.argmax(dim=1)]
        inputs = self.kappa * z

        loss = (freq_smp_weights * self.loss(inputs, targets_)).mean()
        return loss
コード例 #18
0
 def update(self,
            val: Union[float, torch.Tensor, Sequence[torch.Tensor]] = None,
            **kwargs):
     y_pred, y_true = val
     self._num_samples += len(y_pred)
     y_pred = torch.argmax(y_pred, dim=1)
     matrix_indices = self.num_classes * y_true + y_pred
     m = torch.bincount(matrix_indices,
                        minlength=self.num_classes**2).reshape(
                            self.num_classes, self.num_classes)
     self.confusion_matrix += m.to(self.confusion_matrix)
コード例 #19
0
ファイル: infer.py プロジェクト: nvtu/gamestory_mediaeval2019
def quantization(raw_feat):
    # Feature quantization
    raw_feat = torch.Tensor(raw_feat).type(torch.float32)
    c = torch.Tensor(cluster_centers[None, :, :]).type(torch.float32) 
    raw_feat = LazyTensor(raw_feat[:, None, :])
    c = LazyTensor(c)
    dist = ((raw_feat - c) ** 2).sum(-1)
    preds = dist.argmin(dim=1).long().view(-1)
    term_freq = torch.bincount(preds, minlength=num_clusters).type(torch.float32).numpy()
    normed_term_freq = term_freq / np.linalg.norm(term_freq)
    return normed_term_freq
コード例 #20
0
ファイル: model.py プロジェクト: jjedele/pytorch-isic2018
def confusion_matrix(pred: torch.Tensor,
                     target: torch.Tensor,
                     num_classes: int = None) -> torch.Tensor:
    num_classes = get_num_classes(pred, target, num_classes)

    unique_labels = target.view(-1) * num_classes + pred.view(-1)

    bins = torch.bincount(unique_labels, minlength=num_classes**2)
    cm = bins.reshape(num_classes, num_classes).squeeze().float()

    return cm
コード例 #21
0
ファイル: loader.py プロジェクト: exabiome/deep-taxon
    def __call__(self, samples):
        l_idx = -1
        if isinstance(samples, tuple):
            samples = [samples]

        maxlen = 0
        for i, X, y, seq_id in samples:
            if maxlen < X.shape[l_idx]:
                maxlen = X.shape[l_idx]

        X_ret = list()
        y_ret = list()
        idx_ret = list()
        size_ret = list()
        seq_id_ret = list()
        for i, X, y, seq_id in samples:
            dif = maxlen - X.shape[l_idx]
            X_ = X
            if dif > 0:
                X_ = F.pad(X, (0, dif), value=self.padval)
            X_ret.append(X_)
            y_ret.append(y)
            size_ret.append(X.shape[l_idx])
            idx_ret.append(i)
            seq_id_ret.append(seq_id)

        # calculate tetranucleotide frequency
        chunks = torch.stack(X_ret)

        ## 1. hash 4-mers
        __seq = self.cmap[chunks]
        i4mers = torch.stack([__seq[:, 0:-3], __seq[:, 1:-2], __seq[:, 2:-1], __seq[:, 3:]], axis=2)
        mask = torch.any(i4mers < 0, axis=2)
        h4mers = i4mers.matmul(self.bases)       # hashed 4-mers
        h4mers[mask] = 256    # use 257 to mark any 4-mers that had ambiguous nucleotides

        ## 2. count hashed 4-mers i.e. count integers from between 0-257 inclusive
        tnf = torch.zeros((32, 257), dtype=float)
        for i in range(tnf.shape[0]):
            counts = torch.bincount(h4mers[i], minlength=257)
            tnf[i] = counts/i4mers.shape[1]

        ## 3. merge canonical 4-mers
        canon_tnf = torch.zeros((32, 136))
        canon_tnf[:, :len(self.canonical)] = tnf[:, self.canonical] + tnf[:, self.noncanonical]
        canon_tnf[:, len(self.canonical):] = tnf[:, self.palindromes]

        X_ret = canon_tnf
        y_ret = torch.stack(y_ret)
        size_ret = torch.tensor(size_ret)
        idx_ret = torch.tensor(idx_ret)
        seq_id_ret = torch.tensor(seq_id_ret)

        return (idx_ret, X_ret, y_ret, size_ret, seq_id_ret)
コード例 #22
0
ファイル: utils.py プロジェクト: urieli/dhSegment-torch
def batch_bincount(
    batch_input: Union[torch.Tensor, List[torch.Tensor]],
    n_batches: int,
    weights: Optional[torch.Tensor] = None,
    minlength=0,
) -> torch.Tensor:
    return torch.stack([
        torch.bincount(batch_input[batch_idx],
                       weights=weights,
                       minlength=minlength) for batch_idx in range(n_batches)
    ])
コード例 #23
0
 def update(self, a, b):
     n = self.num_classes
     if self.mat is None:
         # 创建混淆矩阵
         self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
     with torch.no_grad():
         # 寻找GT中为目标的像素索引
         k = (a >= 0) & (a < n)
         # 统计像素真实类别a[k]被预测成类别b[k]的个数(这里的做法很巧妙)
         inds = n * a[k].to(torch.int64) + b[k]
         self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
コード例 #24
0
def torch_compute_confusion_matrix(gts, preds):
    gts = torch.stack(gts, dim=0)
    preds = torch.stack(preds, dim=0)
    batch_confusion = torch.zeros((no_of_classes, no_of_classes),
                                  dtype=torch.int64)
    num_classes = no_of_classes

    def check_shape(y, y_pred):
        if y_pred.ndimension() < 2:
            raise ValueError(
                "y_pred must have shape (batch_size, num_categories, ...), "
                "but given {}".format(y_pred.shape))

        if y_pred.shape[1] != num_classes:
            raise ValueError(
                "y_pred ({}) does not have correct number of categories: {} vs {}"
                .format(y_pred.shape, y_pred.shape[1], num_classes))

        if not (y.ndimension() + 1 == y_pred.ndimension()):
            raise ValueError(
                "y_pred must have shape (batch_size, num_categories, ...) and y must have "
                "shape of (batch_size, ...), "
                "but given {} vs {}.".format(y.shape, y_pred.shape))

        y_shape = y.shape
        y_pred_shape = y_pred.shape

        if y.ndimension() + 1 == y_pred.ndimension():
            y_pred_shape = (y_pred_shape[0], ) + y_pred_shape[2:]

        if y_shape != y_pred_shape:
            raise ValueError("y and y_pred must have compatible shapes.")

    check_shape(gts, preds)

    # target is (batch_size, ...)
    preds = torch.argmax(preds, dim=1).flatten()
    gts = gts.flatten()

    target_mask = (gts >= 0) & (gts < num_classes)
    gts = gts[target_mask]
    preds = preds[target_mask]

    indices = num_classes * gts + preds
    m = torch.bincount(indices, minlength=num_classes**2).reshape(
        num_classes, num_classes)
    batch_confusion += m.to(batch_confusion)
    batch_confusion = batch_confusion.cpu().numpy().astype(np.float64)

    inter = np.diag(batch_confusion)
    union = np.sum(batch_confusion, 0) + np.sum(batch_confusion,
                                                1) - np.diag(batch_confusion)
    union = np.maximum(union, 1)
    return inter / union, batch_confusion
コード例 #25
0
def _confusion_matrix_update(
    preds: torch.Tensor, target: torch.Tensor, num_classes: int, threshold: float = 0.5
) -> torch.Tensor:
    preds, target, mode = _input_format_classification(preds, target, threshold)
    if mode not in (DataType.BINARY, DataType.MULTILABEL):
        preds = preds.argmax(dim=1)
        target = target.argmax(dim=1)
    unique_mapping = (target.view(-1) * num_classes + preds.view(-1)).to(torch.long)
    bins = torch.bincount(unique_mapping, minlength=num_classes**2)
    confmat = bins.reshape(num_classes, num_classes)
    return confmat
コード例 #26
0
ファイル: helper.py プロジェクト: flawnson/Generic_GNN
def loss_weights(dataset, agg_mask: np.ndarray,
                 device: torch.device) -> torch.tensor:
    """ These weights are designed to compensate for class imabalance in the dataset (negated effects if class has
        undergone oversampling or undersampling) """
    imb_wc = torch.bincount(
        dataset.ndata["y"][agg_mask], minlength=int(
            dataset.ndata["y"].max())).float().clamp(
                min=1e-10, max=1e10) / dataset.ndata["y"][agg_mask].shape[0]
    weights = (1 / imb_wc) / (sum(1 / imb_wc))

    return weights.to(device)
コード例 #27
0
ファイル: spec_loss.py プロジェクト: vmos1/cosmogan_pytorch
def f_torch_radial_profile(img, center=(None,None)):
    ''' Module to compute radial profile of a 2D image 
    Bincount causes issues with backprop, so not using this code
    '''
    
    y,x=torch.meshgrid(torch.arange(0,img.shape[0]),torch.arange(0,img.shape[1])) # Get a grid of x and y values
    if center[0]==None and center[1]==None:
        center = torch.Tensor([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0]) # compute centers

    # get radial values of every pair of points
    r = torch.sqrt((x - center[0])**2 + (y - center[1])**2)
    r= r.int()
    
#     print(r.shape,img.shape)
    # Compute histogram of r values
    tbin=torch.bincount(torch.reshape(r,(-1,)),weights=torch.reshape(img,(-1,)).type(torch.DoubleTensor))
    nr = torch.bincount(torch.reshape(r,(-1,)))
    radialprofile = tbin / nr
    
    return radialprofile[1:-1]
コード例 #28
0
 def get_cc_partition(self) -> Partition:
     labels = skimage.measure.label(self.index_matrix > 0,
                                    connectivity=2,
                                    background=0,
                                    return_num=False)
     membership = torch.tensor(
         labels, dtype=torch.long,
         device=self.device)[self.i_coordinate_fg_pixel,
                             self.j_coordinate_fg_pixel]
     return Partition(membership=membership,
                      sizes=torch.bincount(membership))
コード例 #29
0
def class_imbalance_sampler(targets, segmentation_threshold):
    if len(targets.shape) > 1:  # if posed as segmentation task
        targets = targets.sum(axis=1) / targets.shape[1]
        targets = targets > segmentation_threshold

    targets = tensor(targets).long().squeeze()
    class_count = torch.bincount(targets)
    weighting = tensor(1.) / class_count.float()
    weights = weighting[targets]
    sampler = WeightedRandomSampler(weights, len(targets))
    return sampler
コード例 #30
0
    def _generate_matrix(self, gt, pr):
        target_mask = (gt >= 0) & (gt < self.num_classes)
        gt = gt[target_mask]
        pr = pr[target_mask]
        gt = gt.long()

        indices = self.num_classes * gt + pr
        conf = torch.bincount(indices, minlength=self.num_classes**2)
        conf = conf.reshape(self.num_classes, self.num_classes)

        return conf.float()