コード例 #1
0
ファイル: scaler.py プロジェクト: zhangxinaaaa/DESED_task
    def forward(self, tensor):
        if self.statistic == "dataset":
            assert hasattr(self, "mean") and hasattr(
                self, "mean_squared"
            ), "TorchScaler should be fit before used if statistics=dataset"
            assert tensor.ndim == self.mean.ndim, "Pre-computed statistics "
            if self.normtype == "mean":
                return tensor - self.mean
            elif self.normtype == "standard":
                std = torch.sqrt(self.mean_squared - self.mean ** 2)
                return (tensor - self.mean) / (std + self.eps)
            else:
                raise NotImplementedError

        else:
            if self.normtype == "mean":
                return tensor - torch.mean(tensor, self.dims, keepdim=True)
            elif self.normtype == "standard":
                return (tensor - torch.mean(tensor, self.dims, keepdim=True)) / (
                    torch.std(tensor, self.dims, keepdim=True) + self.eps
                )
            elif self.normtype == "minmax":
                return (tensor - torch.amin(tensor, dim=self.dims, keepdim=True)) / (
                    torch.amax(tensor, dim=self.dims, keepdim=True)
                    - torch.amin(tensor, dim=self.dims, keepdim=True)
                    + self.eps
                )
コード例 #2
0
def get_asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2):

    noc_cube_1 = get_3d_bbox(scales_1, 0)
    bbox_3d_1 = transform_3d_camera_coords_to_3d_world_coords(noc_cube_1, RT_1)

    noc_cube_2 = get_3d_bbox(scales_2, 0)
    bbox_3d_2 = transform_3d_camera_coords_to_3d_world_coords(noc_cube_2, RT_2)

    bbox_1_max = torch.amax(bbox_3d_1, dim=0)
    bbox_1_min = torch.amin(bbox_3d_1, dim=0)
    bbox_2_max = torch.amax(bbox_3d_2, dim=0)
    bbox_2_min = torch.amin(bbox_3d_2, dim=0)

    overlap_min = torch.maximum(bbox_1_min, bbox_2_min)
    overlap_max = torch.minimum(bbox_1_max, bbox_2_max)

    # intersections and union
    if torch.amin(overlap_max - overlap_min) < 0:
        intersections = 0
    else:
        intersections = torch.prod(overlap_max - overlap_min)

    union = torch.prod(bbox_1_max -
                       bbox_1_min) + torch.prod(bbox_2_max -
                                                bbox_2_min) - intersections
    iou_3d = intersections / union

    return iou_3d
コード例 #3
0
    def forward(self, input):
        if self.statistic == "dataset":
            if self.normtype == "mean":
                return input - self.mean
            elif self.normtype == "standard":
                std = torch.sqrt(self.mean_squared - self.mean**2)
                return (input - self.mean) / (std + self.eps)
            else:
                raise NotImplementedError

        elif self.statistic == "instance":
            if self.normtype == "mean":
                return input - torch.mean(input, self.dims, keepdim=True)
            elif self.normtype == "standard":
                return (input - torch.mean(input, self.dims, keepdim=True)) / (
                    torch.std(input, self.dims, keepdim=True) + self.eps)
            elif self.normtype == "minmax":
                return (input - torch.amin(input, dim=self.dims, keepdim=True)
                        ) / (torch.amax(input, dim=self.dims, keepdim=True) -
                             torch.amin(input, dim=self.dims, keepdim=True) +
                             self.eps)
            else:
                raise NotImplementedError

        else:
            raise NotImplementedError
コード例 #4
0
def sinkhorn(M, r=None, c=None, gamma=1.0, eps=1.0e-6, maxiters=1000, logspace=False):
    """
    PyTorch function for entropy regularized optimal transport. Assumes batched inputs as follows:
        M:  (B,H,W) tensor
        r:  (B,H) tensor, (1,H) tensor or None for constant uniform vector 1/H
        c:  (B,W) tensor, (1,W) tensor or None for constant uniform vector 1/W

    You can back propagate through this function in O(TBWH) time where T is the number of iterations taken to converge.
    """

    B, H, W = M.shape
    assert r is None or r.shape == (B, H) or r.shape == (1, H)
    assert c is None or c.shape == (B, W) or c.shape == (1, W)
    assert not logspace or torch.all(M > 0.0)

    r = 1.0 / H if r is None else r.unsqueeze(dim=2)
    c = 1.0 / W if c is None else c.unsqueeze(dim=1)

    if logspace:
        P = torch.pow(M, gamma)
    else:
        P = torch.exp(-1.0 * gamma * (M - torch.amin(M, 2, keepdim=True)))

    for i in range(maxiters):
        alpha = torch.sum(P, 2)
        # Perform division first for numerical stability
        P = P / alpha.view(B, H, 1) * r

        beta = torch.sum(P, 1)
        if torch.max(torch.abs(beta - c)) <= eps:
            break
        P = P / beta.view(B, 1, W) * c

    return P
コード例 #5
0
def _sinkhorn_inline(M, r=None, c=None, gamma=1.0, eps=1.0e-6, maxiters=1000, logspace=False):
    """As above but with inline calculations for when autograd is not needed."""

    B, H, W = M.shape
    assert r is None or r.shape == (B, H) or r.shape == (1, H)
    assert c is None or c.shape == (B, W) or c.shape == (1, W)
    assert not logspace or torch.all(M > 0.0)

    r = 1.0 / H if r is None else r.unsqueeze(dim=2)
    c = 1.0 / W if c is None else c.unsqueeze(dim=1)

    if logspace:
        P = torch.pow(M, gamma)
    else:
        P = torch.exp(-1.0 * gamma * (M - torch.amin(M, 2, keepdim=True)))

    for i in range(maxiters):
        alpha = torch.sum(P, 2)
        # Perform division first for numerical stability
        P /= alpha.view(B, H, 1)
        P *= r

        beta = torch.sum(P, 1)
        if torch.max(torch.abs(beta - c)) <= eps:
            break
        P /= beta.view(B, 1, W)
        P *= c

    return P
コード例 #6
0
ファイル: __init__.py プロジェクト: zacker150/pytorch
def amin(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype
    if input.layout == torch.strided:
        if mask is None:
            mask_input = input
        else:
            identity = input.new_full([], _reduction_identity('amin', input))
            mask_input = torch.where(mask, input, identity)
        dim_ = _canonical_dim(dim, mask_input.ndim)
        return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    else:
        raise ValueError(
            f'masked amin expects strided tensor (got {input.layout} tensor)')
コード例 #7
0
ファイル: ppo.py プロジェクト: francoisgergaud/ai-traineree
    def compute_policy_loss(self, samples):
        states, actions, old_log_probs, _, advantages = samples

        actor_est = self.actor(states)
        dist = self.policy(actor_est)

        entropy = dist.entropy()
        new_log_probs = self.policy.log_prob(dist, actions).view(-1, 1)
        assert new_log_probs.shape == old_log_probs.shape

        r_theta = (new_log_probs - old_log_probs).exp()
        r_theta_clip = torch.clamp(r_theta, 1.0 - self.ppo_ratio_clip,
                                   1.0 + self.ppo_ratio_clip)
        assert r_theta.shape == r_theta_clip.shape

        # KL = E[log(P/Q)] = sum_{P}( P * log(P/Q) ) -- \approx --> avg_{P}( log(P) - log(Q) )
        approx_kl_div = (old_log_probs - new_log_probs).mean().item()
        if self.using_kl_div:
            # Ratio threshold for updates is 1.75 (although it should be configurable)
            policy_loss = -torch.mean(
                r_theta * advantages) + self.kl_beta * approx_kl_div
        else:
            joint_theta_adv = torch.stack(
                (r_theta * advantages, r_theta_clip * advantages))
            assert joint_theta_adv.shape[0] == 2
            policy_loss = -torch.amin(joint_theta_adv, dim=0).mean()
        entropy_loss = -self.entropy_weight * entropy.mean()

        loss = policy_loss + entropy_loss
        self._metrics['policy/kl_div'] = approx_kl_div
        self._metrics['policy/policy_ratio'] = float(r_theta.mean())
        self._metrics['policy/policy_ratio_clip_mean'] = float(
            r_theta_clip.mean())
        return loss, approx_kl_div
コード例 #8
0
    def forward(self, x):
        # check dims
        if x.dim() != 4:
            raise ValueError('expected 4D input (got {}D input)'.format(
                x.dim()))
        if self.training:
            # batch stats
            x_min = torch.amin(x, dim=(0, 1))
            x_max = torch.amax(x, dim=(0, 1))

            if self.first:
                self.max = x_max
                self.min = x_min
                self.first = False

            else:
                # update min max with masking correect entries
                max_mask = torch.greater(x_max, self.max)
                self.max = (max_mask * x_max) + \
                    (torch.logical_not(max_mask) * self.max)

                min_mask = torch.less(x_min, self.min)
                self.min = (min_mask * x_min) + \
                    (torch.logical_not(min_mask) * self.min)

            self.max_min = self.max - self.min + 1e-13

        # scale batch
        x = (x - self.min) / self.max_min

        return x
コード例 #9
0
ファイル: math_ops.py プロジェクト: malfet/pytorch
 def reduction_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.argmax(a),
         torch.argmin(a),
         torch.amax(a),
         torch.amin(a),
         torch.aminmax(a),
         torch.all(a),
         torch.any(a),
         torch.max(a),
         torch.min(a),
         torch.dist(a, b),
         torch.logsumexp(a, 0),
         torch.mean(a),
         torch.nanmean(a),
         torch.median(a),
         torch.nanmedian(a),
         torch.mode(a),
         torch.norm(a),
         torch.nansum(a),
         torch.prod(a),
         torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.std(a),
         torch.std_mean(a),
         torch.sum(a),
         torch.unique(a),
         torch.unique_consecutive(a),
         torch.var(a),
         torch.var_mean(a),
         torch.count_nonzero(a),
     )
コード例 #10
0
ファイル: __init__.py プロジェクト: paolodedios/pytorch
def amin(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype
    mask_input = _combine_input_and_mask(amin, input, mask)
    if input.layout == torch.strided:
        dim_ = _canonical_dim(dim, mask_input.ndim)
        return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    else:
        raise ValueError(
            f'masked amin expects strided tensor (got {input.layout} tensor)')
コード例 #11
0
ファイル: __init__.py プロジェクト: sujoysaraswati/pytorch
def amin(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype

    mask_input = _combine_input_and_mask(amin, input, mask)
    dim_ = _canonical_dim(dim, mask_input.ndim)
    if input.layout == torch.strided:
        return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    elif input.layout == torch.sparse_coo:
        if mask is None:
            # See comment in the sparse_csr branch of prod, a similar issue arises here
            # where unspecified elements along a dimension may need to be reduced with the result
            raise ValueError('masked amax expects explicit mask for sparse_coo tensor input')
        return _sparse_coo_scatter_reduction_helper(torch.amin, mask_input, dim_, bool(keepdim), dtype)
    else:
        raise ValueError(f'masked amin expects strided or sparse_coo tensor (got {input.layout} tensor)')
コード例 #12
0
    def get_loss_proj(self, pred, gt, loss_type='bce', w=1., min_dist_loss=None,
                      dist_mat=None, args=None, grid_h=64, grid_w=64):

        if loss_type == 'bce':
            # print ('\nBCE Logits Loss\n')
            loss_function = torch.nn.BCEWithLogitsLoss(weight = None, reduction='none')
            loss = loss_function(pred, gt)
        """
        if loss == 'weighted_bce':
            print '\nWeighted BCE Logits Loss\n'
            loss = tf.nn.weighted_cross_entropy_with_logits(targets=gt, logits=pred, 
                            pos_weight=0.5)
        if loss == 'l2_sq':
            print '\nL2 Squared Loss\n'
            loss = (pred-gt)**2
    
        if loss == 'l1':
            print '\nL1 Loss\n'
            loss = abs(pred-gt)
        """
        if loss_type == 'bce_prob':
            # clprint ('\nBCE Loss\n')
            epsilon = 1e-8
            loss = -gt*torch.log(pred+epsilon)*w - (1-gt)*torch.log(torch.abs(1-pred-epsilon))
    
        if min_dist_loss != None:
            # Affinity loss - essentially 2D chamfer distance between GT and 
            # predicted masks
            dist_mat += 1.
            gt_white = torch.unsqueeze(torch.unsqueeze(gt, 3), 3)
            gt_white = gt_white.repeat(1, 1, 1, 64, 64)
            
            pred_white = torch.unsqueeze(torch.unsqueeze(pred, 3), 3)
            pred_white = pred_white.repeat(1, 1, 1, 64, 64)
            
            pred_mask = (pred_white) + ((1.-pred_white))*1e6*torch.ones_like(pred_white)
            dist_masked_inv = gt_white * dist_mat * (pred_mask)
            
            gt_white_th = gt_white + (1.-gt_white)*1e6*torch.ones_like(gt_white)
            dist_masked = gt_white_th * dist_mat * pred_white
            
            min_dist = torch.amin(dist_masked, dim=(3,4))
            min_dist_inv = torch.amin(dist_masked_inv, dim=(3,4))
    
        return loss, min_dist, min_dist_inv
コード例 #13
0
def build_graph(boxes, labels, thresh_size=0.5):
    minx = torch.amin(boxes[:,0])
    miny = torch.amin(boxes[:,1])
    maxx = torch.amax(boxes[:,2])
    maxy = torch.amax(boxes[:,3])
    avg_dim = (maxx - minx + maxy - miny) / 2
    thresh = thresh_size * avg_dim

    centres = torch.tensor([[(x1 + x2) / 2, (y1 + y2) / 2] for x1, y1, x2, y2 in boxes])
    dists = torch.cdist(centres[None], centres[None])[0]

    idxrange = torch.arange(len(centres))
    aa, bb = torch.meshgrid(idxrange, idxrange)
    dir_vecs = (centres[bb] - centres[aa]) / dists.reshape(len(centres), len(centres), 1) # Matrix of normalized direction vectors from aa to bb
    dirs = torch.acos(dir_vecs[:,:,0].clamp(-1, 1)) # Matrix of angles, clamping necessary due to float inaccuracy
    over_180 = dir_vecs[:,:,1] < 0 # y-component < 0 --> true angle is 360 - acos(x)
    dirs[over_180] = 2 * pi - dirs[over_180]

    dir_matrices = {
        'E': (dirs > 15 * pi / 8) | (dirs <= pi / 8),
        **{d: (dirs > (1 + 2 * i) * pi / 8) & (dirs <= (1 + 2 * (i + 1)) * pi / 8) for i, d in enumerate(CARDINALS[1:])}
    }

    g = nx.DiGraph()
    g.add_nodes_from([(i.item(), {'label': labels[i]}) for i in idxrange])
    sorted_dist, sort_idx = dists.sort(dim=1)
    for i in idxrange:
        i = i.item()
        not_found = set(CARDINALS)
        for neigh in g[i]:
            not_found.remove(g[i][neigh]['dir'])
        for d, j in zip(sorted_dist[i], sort_idx[i]):
            if d > thresh or not len(not_found): # Iterate until all cardinal dirs have been found or until we are over the dist threshold
                break
            j = j.item()
            if i == j: continue # No self-loops allowed
            for dir in not_found:
                if _check_dir(i, j, dir, dir_matrices, g, d):
                    not_found.remove(dir)
                    break

    return g
コード例 #14
0
ファイル: image_utils.py プロジェクト: dgketchum/itype
def get_transforms(in_, out_norm):
    """Run through unnormalized training data for global mean and std."""
    train_ds = ITypeDataset(in_, transforms=None)
    dl = DataLoader(
        train_ds,
        batch_size=4,
        num_workers=4,
        pin_memory=True,
        collate_fn=None)

    nimages = 0
    mean = 0.
    std = 0.
    first = True
    for i, (x, _) in enumerate(dl):
        x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3])
        if first:
            max_ = torch.amax(x, dim=(2, 0))
            min_ = torch.amin(x, dim=(2, 0))
            first = False

        amax = torch.amax(x, dim=(2, 0))
        amin = torch.amin(x, dim=(2, 0))
        max_ = torch.max(amax, max_)
        min_ = torch.min(amin, min_)
        nimages += x.size(0)
        mean += x.mean(2).sum(0)
        std += x.std(2).sum(0)

    print('channel-wise min: {}'.format(list(min_.numpy())))
    print('channel-wise max: {}'.format(list(max_.numpy())))

    mean /= nimages
    std /= nimages

    print((mean, std))
    pkl_name = os.path.join(out_norm, 'meanstd.pkl')
    with open(pkl_name, 'wb') as handle:
        pkl.dump((mean, std), handle, protocol=pkl.HIGHEST_PROTOCOL)
コード例 #15
0
    def forward(self, f, g):

        mapped_f = self.map_f(f)
        mapped_g = self.map_g(g)

        attention = self.attend(mapped_f + mapped_g)
        # Shift each sample to have a minimum of zero
        shifted_att = attention - torch.amin(
            attention, dim=[1, 2, 3, 4], keepdim=True)
        # Scale samples so each sum's to 1
        scaled_att = shifted_att / torch.sum(
            shifted_att, dim=[1, 2, 3, 4], keepdim=True)

        return scaled_att * mapped_f
コード例 #16
0
    def __init__(self, *args, **kwargs):
        # `args` can be a variable number of arrays; we flatten them and store
        # them as a single 2-D array `xi` of shape (n_args-1, array_size),
        # plus a 1-D array `di` for the values.
        # All arrays must have the same number of elements
        self.xi = torch.stack(
            [torch.as_tensor(a, dtype=torch.float32).flatten()
             for a in args[:-1]])
        self.N = self.xi.shape[-1]
        self.device = self.xi.device

        self.mode = kwargs.pop('mode', '1-D')

        if self.mode == '1-D':
            self.di = torch.as_tensor(args[-1]).flatten()
            self._target_dim = 1
        elif self.mode == 'N-D':
            self.di = torch.as_tensor(args[-1])
            self._target_dim = self.di.shape[-1]
        else:
            raise ValueError("Mode has to be 1-D or N-D.")

        if self.xi.device != self.di.device:
            raise ValueError("All arrays must be on same device.")

        if not all([x.numel() == self.di.shape[0] for x in self.xi]):
            raise ValueError("All arrays must be equal length.")

        self.norm = kwargs.pop('norm', 2)
        self.epsilon = kwargs.pop('epsilon', None)
        if self.epsilon is None:
            # default epsilon is the "the average distance between nodes" based
            # on a bounding hypercube
            ximax = torch.amax(self.xi, axis=1)
            ximin = torch.amin(self.xi, axis=1)
            edges = ximax - ximin
            edges = edges[torch.nonzero(edges)]
            self.epsilon = torch.prod(edges)/self.N ** (1.0/edges.numel())

        self.smooth = kwargs.pop('smooth', 0.0)
        self.function = kwargs.pop('function', 'multiquadric')

        # attach anything left in kwargs to self for use by any user-callable
        # function or to save on the object returned.
        for item, value in kwargs.items():
            setattr(self, item, value)

        self._compute_weights()
コード例 #17
0
ファイル: model.py プロジェクト: apardyl/ProtoPNet
    def forward_(self, x):
        # x = x.squeeze(0)
        distances = self.prototype_distances(x)
        self.distances = distances
        '''
        we cannot refactor the lines below for similarity scores
        because we need to return min_distances
        '''
        # global min pooling
        min_distances = -F.max_pool2d(
            -distances, kernel_size=(distances.size()[2], distances.size()[3]))
        min_distances = min_distances.view(-1, self.num_prototypes)
        prototype_activations = self.distance_2_similarity(min_distances)

        #prototype_activations = self.dropout(prototype_activations)
        A = torch.ones(
            (1,
             prototype_activations.shape[0])) / prototype_activations.shape[0]
        out_c = None
        if self.mil_pooling == 'gated_attention':
            A_V = self.attention_V(prototype_activations)  # NxD
            A_U = self.attention_U(prototype_activations)  # NxD
            A = self.attention_weights(
                A_V * A_U)  # element wise multiplication # NxK
            A = torch.transpose(A, 1, 0)  # KxN
            A = F.softmax(A, dim=1)  # softmax over N
            M = torch.mm(A, prototype_activations)  # KxL
        elif self.mil_pooling == 'average':
            M = torch.mean(prototype_activations, dim=0, keepdim=True)
        elif self.mil_pooling == 'max':
            M = torch.amax(prototype_activations, dim=0, keepdim=True)
        elif self.mil_pooling == 'min':
            M = torch.amin(prototype_activations, dim=0, keepdim=True)
        elif self.mil_pooling == 'loss_attention':
            M, out_c, A = self.loss_attention(prototype_activations,
                                              self.last_layer.weight,
                                              self.last_layer.bias)
            M = M.mean(0, keepdim=True)
        else:
            raise NotImplementedError()

        logits = self.last_layer(M)

        self.out_c = out_c
        self.A = A

        return logits, min_distances, A, prototype_activations
コード例 #18
0
ファイル: utils.py プロジェクト: penghouwen/nni
def get_min_max_value(x, quant_type, quant_scheme):

    target_dim = get_target_dim(quant_type, quant_scheme)
    if target_dim is None:
        return torch.min(x), torch.max(x)

    indices = list(range(len(x.shape)))
    assert target_dim < len(indices), "target_dim needs to be less than the number of dim of the tensor"
    del indices[target_dim]

    if TORCH_VERSION > (1, 6):
        min_val = torch.amin(x, indices, keepdims=True)
        max_val = torch.amax(x, indices, keepdims=True)
    else:
        min_val = max_val = x
        for ind in indices:
            min_val = torch.min(min_val, dim=ind, keepdim=True)[0]
            max_val = torch.max(max_val, dim=ind, keepdim=True)[0]
    return min_val, max_val
コード例 #19
0
ファイル: model.py プロジェクト: HKUST-KnowComp/VWS-PR
    def get_cos_min_max_similarity(self, senti_emb, negation):
        args = self.args
        batch_size = senti_emb.size(0)
        num_senti = senti_emb.size(1)

        negation_ = torch.unsqueeze(negation,
                                    dim=2).repeat(1, 1, senti_emb.size(-1))
        senti_emb = senti_emb * negation_

        senti_emb = torch.reshape(senti_emb, (-1, senti_emb.size(-1)))
        senti_emb = F.normalize(senti_emb, dim=1, p=2)

        bn_x_bn_cos_sim = torch.matmul(senti_emb, senti_emb.permute(1, 0))

        bn_x_bn_cos_sim = self.block_wise_operator(bn_x_bn_cos_sim, batch_size,
                                                   num_senti)

        b_x_b_max = torch.amax(bn_x_bn_cos_sim, dim=[2, 3])
        b_x_b_min = torch.amin(bn_x_bn_cos_sim, dim=[2, 3])

        max_mask = torch.gt(b_x_b_max, args.gamma_positive)
        min_mask = torch.lt(b_x_b_min, args.gamma_negative)

        and_mask = torch.logical_and(max_mask, min_mask).type(torch.float32)
        and_mask_complement = torch.tensor([1.0]).to(args.device) - and_mask

        max_mask = max_mask.type(torch.float32)
        min_mask = min_mask.type(torch.float32)

        location = torch.eye(batch_size).type('torch.BoolTensor').to(
            args.device)
        max_mask.masked_fill_(location, 0)
        min_mask.masked_fill_(location, 0)

        and_mask.masked_fill_(location, 0)
        and_mask_complement.masked_fill_(location, 0)

        similarity = b_x_b_max * and_mask_complement * max_mask + b_x_b_min * and_mask_complement * min_mask + and_mask

        return similarity, (and_mask_complement * max_mask +
                            and_mask_complement * min_mask + and_mask)
コード例 #20
0
    def add(self, output, target):
        output = self.relu(output)
        output = resize_for_tensors(output, (target.size(-2), target.size(-1)))
        output -= torch.amin(output, dim=(1, 2, 3), keepdim=True)
        output /= (torch.amax(output, dim=(1, 2, 3), keepdim=True))
        # output -= output.min(1, keepdim=True)[0]
        # print(output)
        # output /= output.max(1, keepdim=True)[0]

        output = torch.amax(output, dim=1).squeeze()

        target = (target > 0).type(torch.IntTensor)
        target = target.view(-1)
        valid_idx = target != self.ignore_value

        target[~valid_idx] = 0
        for st, thresh in enumerate(self.thresh_range):
            cur_output = (output > thresh).type(torch.IntTensor).view(-1)
            for i, j in itertools.product(torch.unique(target),
                                          torch.unique(cur_output)):
                self.conf_matrix_lst[st][i, j] += torch.sum(
                    (target[valid_idx] == i) & (cur_output[valid_idx] == j))
コード例 #21
0
def distance_to_reference_trajectory(pred_centroid: torch.Tensor,
                                     ref_traj: torch.Tensor) -> torch.Tensor:
    """ Computes the distance from the predicted centroid to the closest waypoint in the reference trajectory.

    :param pred_centroid: predicted centroid tensor, size: [batch_size, 2]
    :type pred_centroid: torch.Tensor, float
    :param ref_traj: reference trajectory tensor, size: [batch_size, num_timestamps, 2]
    :type ref_traj: torch.Tensor, float
    :return: closest distance between the predicted centroid and the reference trajectory, size: [batch_size,]
    :rtype: torch.Tensor, float
    """
    # [batch_size, 2]
    assert pred_centroid.dim() == 2
    # [batch_size, num_timestamps, 2]
    assert ref_traj.dim() == 3

    # [batch_size,]
    euclidean_distance = torch.linalg.norm(pred_centroid.unsqueeze(1) -
                                           ref_traj,
                                           ord=2,
                                           dim=-1)
    return torch.amin(euclidean_distance, dim=1)
コード例 #22
0
ファイル: vsm.py プロジェクト: agupta24/cs224u-1
def min_pooling(hidden_states):
    """
    Get the min values along `axis=1` of a Tensor.

    Parameters
    ----------
    hidden_states : torch.Tensor, shape `(k, m, n)`
        Where `k` is the number of examples, `m` is the number of vectors
        for each example, and `n` is dimensionality of each vector.

    Raises
    ------
    ValueError
        If `hidden_states` does not have 3 dimensions.

    Returns
    -------
    torch.Tensor of dimension `(k, n)`.

    """
    _check_pooling_dimensionality(hidden_states)
    return torch.amin(hidden_states, axis=1)
コード例 #23
0
def SSIM(Xorig, Xrecon, length=9, width=None, height=None):
    """
    Structural Similarity Index Measure (SSIM)

    Calculates the SSIM between 2 images given the window size. A 
    uniform kernel is used for simplicity. 

    Note: This calculation can be a bottleneck for training time if used 
    in the training loop. 

    """
    window = make_uniform3D_window(length=length, width=width, height=height)
    mu_orig = F.conv3d(Xorig, window)
    mu_recon = F.conv3d(Xrecon, window)

    mu_orig_sq = mu_orig.pow(2)
    mu_recon_sq = mu_recon.pow(2)
    mu_orig_mu_recon = mu_orig * mu_recon

    var_orig = F.conv3d(Xorig.pow(2), window) - mu_orig_sq
    var_recon = F.conv3d(Xrecon.pow(2), window) - mu_recon_sq

    cov_origrecon = F.conv3d(Xorig * Xrecon, window) - mu_orig_mu_recon

    Imax = torch.amax(Xorig, dim=(1, 2, 3, 4))
    Imin = torch.amin(Xorig, dim=(1, 2, 3, 4))

    L = (Imax - Imin).reshape(mu_orig_sq.shape[0], 1, 1, 1, 1)

    c1 = (0.01 * L).pow(2)
    c2 = (0.03 * L).pow(2)

    numerator = (2 * mu_orig_mu_recon + c1) * (2 * cov_origrecon + c2)
    denominator = (mu_orig_sq + mu_recon_sq + c1) * (var_orig + var_recon + c2)

    ssim = (numerator / denominator).mean(dim=(1, 2, 3, 4))
    return (ssim)
コード例 #24
0
ファイル: local_ensemble.py プロジェクト: StanfordASL/SCOD
 def forward_extrap(self, mu, n_eigs):
     N = mu.shape[0]
     unc = torch.zeros(N, self.n_y_samp)
     for j in range(N):
         if self.n_y_samp > 1:
             y_samp = self.dist_fam.sample_y(mu,self.n_y_samp).to(mu.device) # d
             mu = mu[j:j+1,:].expand([self.n_y_samp]+[mu.shape[-1]]) # d x d
             loss = self.dist_fam.loss(mu, y_samp) # d x N
         else:
             loss = mu[j:j+1,0:1]
             
         for k in range(self.n_y_samp):
             zero_grads(self.model)
             loss[k].backward(retain_graph=True)
             
             with torch.no_grad():
                 g = self._get_grad_vec()
                 proj_g = g @ self.top_eigs[:, :n_eigs]
                 proj_g = proj_g @ self.top_eigs[:,:n_eigs].t()
                 proj_g = g - proj_g
                 unc[j,k] = torch.norm(proj_g)
     
     unc = torch.amin(unc, 1)
     return unc
コード例 #25
0
ファイル: metrics.py プロジェクト: Akhilez/vision_lab
def _min(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
    """Simply finds the max off the two tensors.
    Shapes of the two tensors has to be same.
    """
    return torch.amin(torch.stack([x, y]), dim=0)
コード例 #26
0
def stroke_renderer(curve_points: T.Tensor, locations: T.Tensor, colors: T.Tensor, widths: T.Tensor,
                    H: int, W: int, K: int, canvas_color: float):
    """
    Renders the given brushstroke parameters onto a canvas.
    See Alg. 1 in https://arxiv.org/pdf/2103.17185.pdf.

    Args:
        curve_points (tensor): Points specifying the curves that will be rendered on the canvas, shape [N, S, 2].
        locations (tensor): Location of each curve, shape [N, 2].
        colors (tensor): Color of each curve, shape [N, 3].
        widths (tensor): Width of each curve, shape [N, 1].
        H (int): Height of the canvas.
        W (int): Width of the canvas.
        K (int): Number of brushstrokes to consider for each pixel, see Sec. C.2 of the paper (Arxiv version).
        canvas_color (str): Background color of the canvas. Options: 'gray', 'white', 'black', 'noise'.
    Returns:
        (tensor): The rendered canvas, shape [H, W, 3].
    """
    colors = T.clamp(colors, 0., 1.)
    coord_x, coord_y = T.split(locations, [1, 1], dim=-1)
    coord_x = T.clamp(coord_x, 0, W)
    coord_y = T.clamp(coord_y, 0, H)
    locations = T.cat((coord_x, coord_y), dim=1)
    widths = T.exp(widths)

    device = curve_points.device
    N, S, _ = curve_points.shape

    # define coarse grid cell
    t_H = T.linspace(0., float(H), int(H // 5)).to(device)
    t_W = T.linspace(0., float(W), int(W // 5)).to(device)
    P_y, P_x = T.meshgrid(t_H, t_W)
    P = T.stack([P_x, P_y], dim=-1)  # [32, 32, 2]

    # Find nearest brushstrokes' indices for every coarse grid cell
    indices = knn(locations, P.view(-1, 2), k=K)[1]

    # Resize the KNN index tensor to full resolution
    indices = indices.view(len(t_H), len(t_W), -1)
    indices = indices.permute(2, 0, 1)
    indices = TF.resize(indices, size=(H, W), interpolation=TF.InterpolationMode.NEAREST)
    indices = indices.permute(1, 2, 0)

    # locations of points sampled from curves
    canvas_with_nearest_Bs = curve_points[indices.flatten()].view(H, W, K, S, 2)

    # colors of curves
    canvas_with_nearest_Bs_colors = colors[indices.flatten()].view(H, W, K, 3)

    # brush size
    canvas_with_nearest_Bs_bs = widths[indices.flatten()].view(H, W, K, 1)

    # Now create full-size canvas
    t_H = T.linspace(0., float(H), H).to(device)
    t_W = T.linspace(0., float(W), W).to(device)
    P_y, P_x = T.meshgrid(t_H, t_W)
    P_full = T.stack([P_x, P_y], dim=-1)  # [H, W, 2]

    # Compute distance from every pixel on canvas to each (among nearest ones) line segment between points from curves
    indices_a = T.tensor([i for i in range(S - 1)], dtype=T.long).to(device)
    canvas_with_nearest_Bs_a = canvas_with_nearest_Bs[:, :, :, indices_a, :]  # start points of each line segment
    indices_b = T.tensor([i for i in range(1, S)], dtype=T.long).to(device)
    canvas_with_nearest_Bs_b = canvas_with_nearest_Bs[:, :, :, indices_b, :]  # end points of each line segments
    canvas_with_nearest_Bs_b_a = canvas_with_nearest_Bs_b - canvas_with_nearest_Bs_a  # [H, W, N, S - 1, 2]
    P_full_canvas_with_nearest_Bs_a = P_full[:, :, None, None, :] - canvas_with_nearest_Bs_a  # [H, W, K, S - 1, 2]

    # find the projection of grid points on curves
    # first find the projections of a grid point on each line segment of a curve
    # numerator is the dot product between two vectors
    # the first vector is the line segments. the second vector is the sample points -> grid
    t = T.sum(canvas_with_nearest_Bs_b_a * P_full_canvas_with_nearest_Bs_a, dim=-1) / (
            T.sum(canvas_with_nearest_Bs_b_a ** 2, dim=-1) + 1e-8)

    # if t value is outside [0, 1], then the nearest point on the line does not lie on the segment, so clip values of t
    t = T.clamp(t, 0., 1.)

    # compute closest points on each line segment, which are the projections on each segment - [H, W, K, S - 1, 2]
    closest_points_on_each_line_segment = canvas_with_nearest_Bs_a + t[..., None] * canvas_with_nearest_Bs_b_a

    # compute the distance from every pixel to the closest point on each line segment - [H, W, K, S - 1]
    dist_to_closest_point_on_line_segment = T.sum(
        (P_full[..., None, None, :] - closest_points_on_each_line_segment) ** 2, dim=-1)

    # and distance to the nearest bezier curve.
    D_per_strokes = T.amin(dist_to_closest_point_on_line_segment, dim=-1)  # [H, W, K]
    D = T.amin(D_per_strokes, dim=-1)  # [H, W]

    # Finally render curves on a canvas to obtain image.
    I_NNs_B_ranking = F.softmax(100000. * (1.0 / (1e-8 + D_per_strokes)), dim=-1)  # [H, W, N]
    I_colors = T.einsum('hwnf,hwn->hwf', canvas_with_nearest_Bs_colors, I_NNs_B_ranking)  # [H, W, 3]
    bs = T.einsum('hwnf,hwn->hwf', canvas_with_nearest_Bs_bs, I_NNs_B_ranking)  # [H, W, 1]
    bs_mask = T.sigmoid(bs - D[..., None])  # AOE of each brush stroke
    canvas = T.ones_like(I_colors) * canvas_color
    I = I_colors * bs_mask + (1 - bs_mask) * canvas
    return I  # HxWx3
コード例 #27
0
ファイル: ensemble.py プロジェクト: iksmada/PC-DARTS
def min_max_scaler(input):
    min = torch.amin(input, dim=2, keepdim=True, out=None)  # mix max scaler
    input = torch.add(input, torch.negative(min), out=None)  # add min
    max = torch.amax(input, dim=2, keepdim=True, out=None)  # calculate max
    return torch.div(input, max, out=None)  # devide by max
コード例 #28
0
def val_fct(val_set, batch_size, input_width, snip_num=6, overlap=1):
    '''
    val_set: dataset
    batch_size: validation batch size
                (train_batch_size//number of spectrogram snippets)
    input_width: resolution of input width (columns dimension)
    snip_num: Number of snippets that spectrogram inputs are cut in
    overlap: int indicating overlap of spectrogram snippets
             1 = no overlap, 2 ~ 50% overlap
    '''
    model.eval()
    data_loader = DataLoader(val_set,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=num_cpus)

    time_cutoff = input_width*snip_num
    # stride used to cut spectrograms into chunks for validation/prediction
    # e.g. (2400-300)/(8*2-1) = 140
    validation_stride = (time_cutoff-input_width)//(snip_num*overlap-1)

    val_loss = 0
    val_predictions = []
    val_labels = []
    for inputs, labels in data_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        # adjust for last (potentially shorter) batch
        batch_size = inputs.shape[0]

        # go over spectrogram to cut out parts,
        # possibly overlapping with stride < kernel_size
        inputs_unfold = F.unfold(inputs[:, :, :, :time_cutoff],
                                 kernel_size=input_width,
                                 stride=validation_stride)
        # assuring correct order within batch
        inputs_transposed = inputs_unfold.transpose(1, 2)
        # reshape from (val_batch_size, overlap*snip_num, -1) to
        # (train_batch_size, filter channels, input_dim[0], input_dim[1])
        inputs_final = inputs_transposed.reshape(batch_size
                                                 * snip_num
                                                 * overlap,
                                                 3, input_width, input_width)
        # do same for labels for dimensions to match
        labels_duplicated = torch.cat([labels]
                                      * snip_num
                                      * overlap, dim=1).view(snip_num
                                                             * overlap
                                                             * batch_size,
                                                             labels.shape[1])
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=use_amp):
                output = model(inputs_final)
                loss = loss_fct(output, labels_duplicated)
            mean_loss_over_classes = torch.mean(loss, dim=1)
            loss_predicted = torch.amin(
                                torch.stack(
                                    torch.chunk(mean_loss_over_classes,
                                                chunks=batch_size)),
                                dim=1)
            loss = torch.mean(loss_predicted)
            val_loss += loss.item()

            pred_per_chunk = output.cpu().detach()

            # snip_num chunks x 8 batch components => 8 predictions
            # get highest probability per class over all snip_num
            # spectrogram parts for each batch component
            batch_pred = torch.amax(torch.stack(torch.chunk(pred_per_chunk,
                                                            chunks=batch_size,
                                                            dim=0)),
                                    dim=1)
            val_predictions.append(batch_pred)
            val_labels.append(labels.cpu())

    val_predictions = torch.cat(val_predictions, dim=0)
    val_labels = torch.cat(val_labels)
    if make_songtype_extra:
        val_predictions[:, 17] = torch.amax(val_predictions[:, [17, 24]],
                                            dim=1)
        val_predictions[:, 23] = torch.amax(val_predictions[:, [23, 25]],
                                            dim=1)
        val_predictions = val_predictions[:, :24]

        val_labels[:, 17][val_labels[:, 24] == 1] = 1
        val_labels[:, 23][val_labels[:, 25] == 1] = 1
        val_labels = val_labels[:, :24]

    lrap = lrap_score(val_labels.numpy(), val_predictions.numpy())
    avg_val_loss = val_loss / len(data_loader)
    return avg_val_loss, lrap
コード例 #29
0
        return compute_sum(add_in_place, sum_block, multiply_in_place)

    return torch_semiring_einsum.semiring_einsum_forward(
        equation, args, block_size, func)


equation = 'bij,bik->bjk'
equation = torch_semiring_einsum.compile_equation(equation)
mats = numpy.random.uniform(size=(256, 16, 256))
mats_torch = torch.from_numpy(mats).cuda()
t0 = time.time()
output = dominate_semiring(equation, mats_torch, mats_torch, block_size=10)
x1 = output.cpu().numpy()
print(time.time() - t0)

# method 3: broadcast
import numpy
import time
import torch

mats = numpy.random.uniform(size=(256, 16, 512))
mats_torch = torch.from_numpy(mats).cuda()
t0 = time.time()
left_broad = torch.transpose(mats_torch, 1, 2).unsqueeze(-1)
right_broad = mats_torch.unsqueeze(-3)
stack = torch.stack(torch.broadcast_tensors(left_broad, right_broad), 0)
conj = stack[0, :, :] >= stack[1, :, :]
output = torch.amin(conj, -2)
x2 = output.cpu().numpy()
print(time.time() - t0)
コード例 #30
0
ファイル: arithmetic.py プロジェクト: jk983294/morph
 print(torch.renorm(mat1, 1, 0, 5))
 input_ = torch.tensor([10000., 1e-07])
 other_ = torch.tensor([10000.1, 1e-08])
 print(torch.floor_divide(input_, other_))  # trunc(input_ / other_)
 print(torch.allclose(input_, other_))  # ∣input−other∣≤atol+rtol×∣other∣
 print(torch.isclose(input_, other_))  # ∣input−other∣≤atol+rtol×∣other∣
 print(mat1)
 print(torch.where(mat1 > 0, mat1, -mat1))
 print(torch.amax(mat1, 0))  # 按列
 print(torch.amax(mat1, 1))  # 按行
 print(torch.max(mat1, 0))  # 按列
 print(torch.max(mat1, 1))  # 按行
 print(torch.argmax(mat1))  # 所有元素
 print(torch.argmax(mat1, 0))  # 按列
 print(torch.argmax(mat1, 1))  # 按行
 print(torch.amin(mat1, 0))  # 按列
 print(torch.amin(mat1, 1))  # 按行
 print(torch.argmin(mat1))  # 所有元素
 print(torch.argmin(mat1, 0))  # 按列
 print(torch.argmin(mat1, 1))  # 按行
 print(torch.argsort(mat1, 0))  # 按列, returns the indices
 print(torch.argsort(mat1, 1))  # 按行
 print(torch.topk(mat1, 2))
 # print(torch.msort(mat1))  # 按行
 print(torch.kthvalue(mat1, 1, 0))
 print(torch.kthvalue(mat1, 1, 1))
 print(torch.logsumexp(mat1, 1))  # 按行
 """cum"""
 print("cum function:")
 print(torch.logcumsumexp(x, dim=0))  # log (sigma(exp(xi)))
 print(torch.cummax(x, dim=0))