Exemple #1
0
def _single_tensor_adamax(params: List[Tensor], grads: List[Tensor],
                          exp_avgs: List[Tensor], exp_infs: List[Tensor],
                          state_steps: List[Tensor], *, eps: float,
                          beta1: float, beta2: float, lr: float,
                          weight_decay: float):

    for i, param in enumerate(params):
        grad = grads[i]
        exp_avg = exp_avgs[i]
        exp_inf = exp_infs[i]
        step_t = state_steps[i]
        # update step
        step_t += 1
        step = step_t.item()

        if weight_decay != 0:
            grad = grad.add(param, alpha=weight_decay)

        # Update biased first moment estimate.
        exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
        # Update the exponentially weighted infinity norm.
        norm_buf = torch.cat([
            exp_inf.mul_(beta2).unsqueeze(0),
            grad.abs().add_(eps).unsqueeze_(0)
        ], 0)
        torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)

        bias_correction = 1 - beta1**step
        clr = lr / bias_correction

        param.addcdiv_(exp_avg, exp_inf, value=-clr)
Exemple #2
0
def get_asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2):

    noc_cube_1 = get_3d_bbox(scales_1, 0)
    bbox_3d_1 = transform_3d_camera_coords_to_3d_world_coords(noc_cube_1, RT_1)

    noc_cube_2 = get_3d_bbox(scales_2, 0)
    bbox_3d_2 = transform_3d_camera_coords_to_3d_world_coords(noc_cube_2, RT_2)

    bbox_1_max = torch.amax(bbox_3d_1, dim=0)
    bbox_1_min = torch.amin(bbox_3d_1, dim=0)
    bbox_2_max = torch.amax(bbox_3d_2, dim=0)
    bbox_2_min = torch.amin(bbox_3d_2, dim=0)

    overlap_min = torch.maximum(bbox_1_min, bbox_2_min)
    overlap_max = torch.minimum(bbox_1_max, bbox_2_max)

    # intersections and union
    if torch.amin(overlap_max - overlap_min) < 0:
        intersections = 0
    else:
        intersections = torch.prod(overlap_max - overlap_min)

    union = torch.prod(bbox_1_max -
                       bbox_1_min) + torch.prod(bbox_2_max -
                                                bbox_2_min) - intersections
    iou_3d = intersections / union

    return iou_3d
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
    b, h, *_ = data.shape

    data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.

    ratio = (projection_matrix.shape[0] ** -0.5)

    projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
    projection = projection.type_as(data)

    data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)

    diag_data = data ** 2
    diag_data = torch.sum(diag_data, dim=-1)
    diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
    diag_data = diag_data.unsqueeze(dim=-1)

    if is_query:
        data_dash = ratio * (
            torch.exp(data_dash - diag_data -
                    torch.amax(data_dash, dim=-1, keepdim=True).detach()) + eps)
    else:
        data_dash = ratio * (
            torch.exp(data_dash - diag_data - torch.amax(data_dash, dim=(-1, -2), keepdim=True).detach()) + eps)

    return data_dash.type_as(data)
Exemple #4
0
def adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor],
           exp_infs: List[Tensor], state_steps: List[int], *, eps: float,
           beta1: float, beta2: float, lr: float, weight_decay: float):
    r"""Functional API that performs adamax algorithm computation.

    See :class:`~torch.optim.Adamax` for details.
    """

    for i, param in enumerate(params):
        grad = grads[i]
        exp_avg = exp_avgs[i]
        exp_inf = exp_infs[i]
        step = state_steps[i]

        if weight_decay != 0:
            grad = grad.add(param, alpha=weight_decay)

        # Update biased first moment estimate.
        exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
        # Update the exponentially weighted infinity norm.
        norm_buf = torch.cat([
            exp_inf.mul_(beta2).unsqueeze(0),
            grad.abs().add_(eps).unsqueeze_(0)
        ], 0)
        torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)

        bias_correction = 1 - beta1**step
        clr = lr / bias_correction

        param.addcdiv_(exp_avg, exp_inf, value=-clr)
Exemple #5
0
def quad_propagate(target, source, i, dim=0):
    _i = i << 1
    if dim == 0:
        xx = torch.amax(source[_i:_i + 2], dim=dim)
    else:
        xx = torch.amax(source[:, _i:_i + 2], dim=dim)
    if dim == 0:
        target[i] = torch.amax(xx.view(-1, 2), dim=1)
    else:
        target[:, i] = torch.amax(xx.view(-1, 2), dim=1)
Exemple #6
0
    def step(self, closure=None):
        """Performs a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        """
        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad
                if grad.is_sparse:
                    raise RuntimeError(
                        'Adamax does not support sparse gradients')
                state = self.state[p]

                # State initialization
                if len(state) == 0:
                    state['step'] = 0
                    state['exp_avg'] = torch.zeros_like(
                        p, memory_format=torch.preserve_format)
                    state['exp_inf'] = torch.zeros_like(
                        p, memory_format=torch.preserve_format)

                exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
                beta1, beta2 = group['betas']
                eps = group['eps']

                state['step'] += 1

                if group['weight_decay'] != 0:
                    grad = grad.add(p, alpha=group['weight_decay'])

                # Update biased first moment estimate.
                exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
                # Update the exponentially weighted infinity norm.
                norm_buf = torch.cat([
                    exp_inf.mul_(beta2).unsqueeze(0),
                    grad.abs().add_(eps).unsqueeze_(0)
                ], 0)
                torch.amax(norm_buf, 0, keepdim=False, out=exp_inf)

                bias_correction = 1 - beta1**state['step']
                clr = group['lr'] / bias_correction

                p.addcdiv_(exp_avg, exp_inf, value=-clr)

        return loss
    def optimize_model(self):
        """
        Optimize the network via a training function. Will return immediately
        without training if there is not enough memory in the experience replay.
        """
        # If the NStepModule's experience replay isn't large enough, we should bail out.
        # Otherwise, we can grab sample data from the replay memory.
        if not self.NStepModule.isMemoryLargeEnoughToTrain(BATCH_SIZE):
            return
        transitions = self.NStepModule.sampleReplayMemory(BATCH_SIZE)

        # Create the batch of data to use
        batch = Transition(*zip(*transitions))
        nth_next_state_swarms_batch = torch.from_numpy(
            np.asarray(batch.next_state_swarms))
        swarm_state_batch = torch.from_numpy(np.asarray(batch.swarm_obs))
        swarm_action_batch = torch.from_numpy(np.asarray(
            batch.swarm_action)).unsqueeze(1)
        reward_batch = torch.from_numpy(np.asarray(batch.reward))
        # Compute a mask of non-final states and concatenate the batch elements
        non_final_mask = torch.from_numpy(np.asarray(batch.doesNotHitDone))
        non_final_next_state_swarms_batch = nth_next_state_swarms_batch[
            non_final_mask, :, :]

        # Compute the swarm's predicted qs for the current state
        state_swarms_predicted_q_batch = self.policy_net(
            swarm_state_batch).gather(1, swarm_action_batch)

        # Compute the swarm's future value for next states
        next_state_swarms_predicted_qs_batch = torch.zeros(
            (BATCH_SIZE, 12, self.num_nodes), device=device)
        for swarm_num in range(NUM_GROUPS):
            next_state_swarms_predicted_qs_batch[
                non_final_mask, swarm_num, :] = self.target_net(
                    non_final_next_state_swarms_batch[:,
                                                      swarm_num, :]).detach()
        # Limit future value to the best q value for each swarm
        max_next_state_swarms_predicted_qs_batch = torch.amax(
            next_state_swarms_predicted_qs_batch, axis=2)
        max_next_state_predicted_q_batch = torch.amax(
            max_next_state_swarms_predicted_qs_batch, axis=1)
        # Compute the estimated future reward
        estimated_future_reward = max_next_state_predicted_q_batch * GAMMA**N_STEP + reward_batch

        # Compute the loss
        loss = F.smooth_l1_loss(
            state_swarms_predicted_q_batch,
            estimated_future_reward.type(torch.FloatTensor).unsqueeze(1))
        self.optimizer.zero_grad()
        loss.backward()
        for param in self.policy_net.parameters():
            param.grad.data.clamp_(-1, 1)
        self.optimizer.step()
def calc_top_k_dist(k=10):
    # define constants
    batch_size = 5
    loss_func = nn.MSELoss()
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    # construct datasets
    _, _, test_non_novel = torch_mnist(batch_size, [0, 1, 2, 3, 4],
                                       shuffle=False)
    _, _, test_novel = torch_mnist(batch_size, [5, 6, 7, 8, 9], shuffle=False)
    # construct model
    model_path = "models\\LSA_mnist_no_est_class_0_1_2_3_4\\500_lr_1e-2\\LSA_mnist_no_est_500.pt"
    model = load_model(model_path)
    model.to(device)
    model.eval()
    # eval model
    for is_novel in [False, True]:
        top_k_dist = np.asarray([])
        # choose data loader
        if not is_novel:
            data_loader = test_non_novel
        else:
            data_loader = test_novel
        # generate data
        for data, target in data_loader:
            data = data.to(device)
            data.requires_grad_()
            r_data, embedding = model(data)
            batch_loss = loss_func(data, r_data)
            batch_loss.backward()
            saliency = data.grad.data
            # remove grad from tensors for numpy conversion
            data = data.detach().cpu()
            r_data = r_data.detach().cpu()
            saliency = saliency.detach().cpu()
            # regularize the saliency map
            saliency = torch.abs(saliency)
            saliency = saliency / torch.amax(saliency, (2, 3), keepdim=True)
            # regularized compute squared error
            r_error = torch.square(r_data - data)
            r_error = r_error / torch.amax(r_error, (2, 3), keepdim=True)
            # calc agreement between top k
            top_s = top_k(saliency, k)
            top_r = top_k(r_error, k)
            for i in range(top_s.shape[0]):
                top_s_pos = np.argwhere(top_s[i, 0] > 0)
                top_r_pos = np.argwhere(top_r[i, 0] > 0)
                min_r_to_s = np.min(cdist(top_s_pos, top_r_pos), axis=1)
                top_k_dist = np.hstack((top_k_dist, np.max(min_r_to_s)))
        # save top k results
        fname = "novel_top_%i_dist.npy" % (
            k, ) if is_novel else "non_novel_top_%i_dist.npy" % (k, )
        np.save(fname, top_k_dist)
def test_fct(test_set, batch_size, input_width, snip_num=8, overlap=1):
    model.eval()
    data_loader = DataLoader(test_set,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=num_cpus)

    time_cutoff = input_width * snip_num
    # stride used to cut spectrograms into chunks for prediction
    # e.g. (2400-300)/(8*2-1) = 140
    test_stride = (time_cutoff - input_width) // (snip_num * overlap - 1)

    test_predictions = []
    for inputs in data_loader:
        inputs = inputs.to(device)
        # adjust for last (potentially shorter) batch
        batch_size = inputs.shape[0]

        # go over spectrogram to cut out parts,
        # possibly overlapping with stride < kernel_size
        inputs_unfold = F.unfold(inputs[:, :, :, :time_cutoff],
                                 kernel_size=input_width,
                                 stride=test_stride)
        # assuring correct order within batch
        inputs_transposed = inputs_unfold.transpose(1, 2)
        # reshape from (val_batch_size, overlap*snip_num, -1) to
        # (train_batch_size, filter channels, input_dim[0], input_dim[1])
        inputs_final = inputs_transposed.reshape(
            batch_size * snip_num * overlap, 3, input_width, input_width)
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=use_amp):
                output = torch.sigmoid(model(inputs_final))
            pred_per_chunk = output.cpu().detach()

            # get highest probability per class over all spectrogram parts
            # for each batch component
            # e.g. 8 chunks x 4 batch components => 4 predictions
            batch_pred = torch.amax(torch.stack(
                torch.chunk(pred_per_chunk, chunks=batch_size, dim=0)),
                                    dim=1)
            test_predictions.append(batch_pred)

    test_predictions = torch.cat(test_predictions, dim=0)
    if make_songtype_extra:
        test_predictions[:, 17] = torch.amax(test_predictions[:, [17, 24]],
                                             dim=1)
        test_predictions[:, 23] = torch.amax(test_predictions[:, [23, 25]],
                                             dim=1)
        test_predictions = test_predictions[:, :24]

    return test_predictions
def calc_mse(square_saliency=False):
    # define constants
    batch_size = 5
    loss_func = nn.MSELoss()
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    # construct datasets
    _, _, test_non_novel = torch_mnist(batch_size, [0, 1, 2, 3, 4],
                                       shuffle=False)
    _, _, test_novel = torch_mnist(batch_size, [5, 6, 7, 8, 9], shuffle=False)
    # construct model
    model_path = "models\\LSA_mnist_no_est_class_0_1_2_3_4\\500_lr_1e-2\\LSA_mnist_no_est_500.pt"
    model = load_model(model_path)
    model.to(device)
    model.eval()
    # eval model
    for is_novel in [False, True]:
        mse = np.asarray([])
        # choose data loader
        if not is_novel:
            data_loader = test_non_novel
        else:
            data_loader = test_novel
        # generate data
        for data, target in data_loader:
            data = data.to(device)
            data.requires_grad_()
            r_data, embedding = model(data)
            batch_loss = loss_func(data, r_data)
            batch_loss.backward()
            saliency = data.grad.data
            # remove grad from tensors for numpy conversion
            data = data.detach().cpu()
            r_data = r_data.detach().cpu()
            saliency = saliency.detach().cpu()
            # regularize the saliency map
            saliency = torch.abs(saliency)
            saliency = saliency / torch.amax(saliency, (2, 3), keepdim=True)
            if square_saliency:
                saliency = torch.square(saliency)
            # regularized compute squared error
            r_error = torch.square(r_data - data)
            r_error = r_error / torch.amax(r_error, (2, 3), keepdim=True)
            # calc agreement between top k
            mse_batch = np.square(saliency - r_error).mean(axis=(1, 2, 3))
            mse = np.hstack((mse, mse_batch))
        # save top k results
        fname = "novel_mse.npy" if is_novel else "non_novel_mse.npy"
        if square_saliency:
            fname = "sqr_" + fname
        np.save(fname, mse)
Exemple #11
0
def amax(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype
    mask_input = _combine_input_and_mask(amax, input, mask)
    if input.layout == torch.strided:
        dim_ = _canonical_dim(dim, mask_input.ndim)
        return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    else:
        raise ValueError(
            f'masked amax expects strided tensor (got {input.layout} tensor)')
Exemple #12
0
def to_categorical(X, n_col=None):
    if not n_col:
        n_col = torch.amax(X) + 1

    one_hot = torch.zeros((X.shape[0], n_col))
    one_hot[torch.arange(X.shape[0]), X] = 1
    return one_hot
Exemple #13
0
 def test_schema_check_mode_functionality_with_multiple_outputs_aliasing(
         self):
     x = torch.rand((3, 3))
     actual = torch.zeros(3)
     with enable_torch_dispatch_mode(SchemaCheckMode()):
         torch.aminmax(x, dim=0, out=[actual, actual])
     self.assertEqual(torch.amax(x, dim=0), actual)
Exemple #14
0
                def get_trigger_probs(batch, all_logits, loss_type='clean', ix=None):
                    ix_plus_one = None
                    if ix is not None:
                        ix_plus_one = ix+1
                    input_length = batch['input_ids'].shape[-1]
                    # TODO: Remove hardcoding here and instead stack the logits
                    logit_matrix = torch.stack(all_logits[f'{loss_type}_start'][ix:ix_plus_one]).mean(0).unsqueeze(1).expand(-1,input_length,-1) + \
                                   torch.stack(all_logits[f'{loss_type}_end'][ix:ix_plus_one]).mean(0).unsqueeze(-1).expand(-1,-1, input_length)
                    logit_matrix += (~batch['valid_mask'])*(-1e10)
                    temperature = args.temperature
                    if train_or_test == 'test':
                        temperature = 1
                    scores = torch.exp((logit_matrix)/temperature)
                    probs = scores/torch.sum(scores, dim=[1,2]).view(-1,1,1).expand(-1, input_length, input_length)
                    
                    
                    num_triggered = torch.zeros(1, device=DEVICE)
                    if train_or_test == 'test' and populate_baselines == False:
                        best_ans_ixs = torch.arange(len(probs)), probs.view(len(probs), -1).argmax(dim=-1)
                        num_triggered = batch['trigger_matrix_mask'].bool().view(len(probs), -1)[best_ans_ixs].sum()
                    
                    answer_prob = torch.zeros(1, device=DEVICE)
                    if populate_baselines == True:
                        answer_prob = torch.sum(probs*batch['answer_mask'].expand(probs.shape), dim=[-1,-2])
                    
                    if args.likelihood_agg == 'sum':
                        input_trigger_probs = torch.sum(probs*batch['trigger_matrix_mask'].expand(probs.shape), dim=[-1,-2])
                    elif args.likelihood_agg == 'max':
                        input_trigger_probs = torch.amax(probs*batch['trigger_matrix_mask'].expand(probs.shape), dim=[-1,-2])
                    else:
                        return NotImplementedError

                    return input_trigger_probs, num_triggered, answer_prob
Exemple #15
0
    def forward(ctx, input, num_bits, min_value=None, max_value=None, num_groups=1):
        """
        Args:
            inputs (`torch.FloatTensor`)
                The input which needs to be quantized
            num_bits (int, >=4)
                Number of bits to use for quantization
            min_value/max_vlue (torch.FloatTensor)
                Used for static activation quantization
            num_groups (int)
                How many groups to partition the quantization into
        Returns:
            quantized_input (`torch.FloatTensor`)
                Quantized input
        """
        assert (min_value is None
                and max_value is None) or (min_value is not None
                                           and max_value is not None and num_groups == 1)
        q_range = 2**num_bits
        input_shape = input.shape
        if min_value is None:
            input = input.reshape(num_groups, -1)
            max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1)
        else:
            max_input = torch.max(min_value.abs(), max_value).view(-1)

        scale = 2 * max_input / q_range
        output = (input / scale).round().clamp(-q_range // 2, q_range // 2 - 1) * scale
        output = output.reshape(input_shape).contiguous()
        return output
Exemple #16
0
def move_sdims(stens, stable_dims):
    """Return copy of input STensor with new stable dims"""
    # Get the data dimensions associated with new stable dims
    assert all(0 <= i < stens.ndim for i in stable_dims)
    data_dims = tuple(i for i in range(stens.ndim) if i not in stable_dims)

    # Rescale data tensor relative to maximum of scale values, expanding
    # the slices of the former and getting a preliminary scale
    new_scale = torch.amax(stens.scale, dim=data_dims, keepdim=True)
    new_data = stens.data * 2**(stens.scale - new_scale)

    # Get the norm of all new slices as a correction to the above scale
    if data_dims == ():
        new_norms = new_data.abs()
    else:
        new_norms = torch.sum(new_data.abs(), dim=data_dims, keepdim=True)
    correction = torch.floor(
        TARGET_SCALE(new_data.shape, data_dims) - torch.log2(new_norms))

    # Filter out any spurious infinities from zero slices
    if torch.any(torch.isinf(correction)):
        correction = torch.where(torch.isfinite(correction), correction,
                                 torch.zeros_like(correction))

    # Apply correction to new scale and data tensors, return result
    new_data *= 2**correction
    new_scale = new_scale - correction
    new_stens = STensor(new_data, new_scale)
    assert new_stens.stable_dims == stable_dims
    new_stens.rescale_()
    return new_stens
Exemple #17
0
 def forward(self, distance_matrix, num_batch_classes):
     num_batch_images = distance_matrix.shape[0]
     num_images_per_class = num_batch_images // num_batch_classes
     template = torch.zeros((num_batch_images, num_batch_images))
     for x in range(num_batch_images):
         min = x // num_images_per_class * num_images_per_class
         max = min + num_images_per_class
         for y in range(min, max):
             if x != y:
                 template[x, y] = 1
     positive_distance_matrix = distance_matrix.mul(template)
     # print(positive_distance_matrix[:10,:10])
     negative_distance_matrix = distance_matrix - positive_distance_matrix
     # print(negative_distance_matrix[:10,:10])
     positive_distance = torch.amax(positive_distance_matrix, dim=1)
     # print(positive_distance[:10], positive_distance.shape)
     negative_distance, _ = torch.sort(negative_distance_matrix)
     # print(negative_distance[:10, :10])
     negative_distance = negative_distance[:, num_images_per_class]
     # print(negative_distance[:10], negative_distance.shape)
     one = torch.ones(num_batch_images)
     one = -one
     if self.soft_margin:
         soft_margin_loss = nn.SoftMarginLoss()
         loss = soft_margin_loss(positive_distance - negative_distance, one)
     else:
         losses = positive_distance - negative_distance + self.margin
         # print(losses[:10], losses.shape)
         losses = torch.clamp(losses, min=0)
         # print(losses[:10], losses.shape)
         loss = torch.mean(losses)
     return loss
Exemple #18
0
    def forward(self, input):
        if self.statistic == "dataset":
            if self.normtype == "mean":
                return input - self.mean
            elif self.normtype == "standard":
                std = torch.sqrt(self.mean_squared - self.mean**2)
                return (input - self.mean) / (std + self.eps)
            else:
                raise NotImplementedError

        elif self.statistic == "instance":
            if self.normtype == "mean":
                return input - torch.mean(input, self.dims, keepdim=True)
            elif self.normtype == "standard":
                return (input - torch.mean(input, self.dims, keepdim=True)) / (
                    torch.std(input, self.dims, keepdim=True) + self.eps)
            elif self.normtype == "minmax":
                return (input - torch.amin(input, dim=self.dims, keepdim=True)
                        ) / (torch.amax(input, dim=self.dims, keepdim=True) -
                             torch.amin(input, dim=self.dims, keepdim=True) +
                             self.eps)
            else:
                raise NotImplementedError

        else:
            raise NotImplementedError
Exemple #19
0
def minkowski_distance_p(x, y, p=2):
    """
    Compute the p-th power of the L**p distance between two arrays.
    For efficiency, this function computes the L**p distance but does
    not extract the pth root. If `p` is 1 or infinity, this is equal to
    the actual L**p distance.
    Parameters
    ----------
    x : (M, K) tensor
        Input array.
    y : (N, K) tensor
        Input array.
    p : float, 1 <= p <= infinity
        Which Minkowski p-norm to use.
    Examples
    --------
    >>> x=torch.tensor([[0,0],[0,0]], dtype=torch.double)
    >>> y=torch.tensor([[1,1],[0,1]], dtype=torch.double)
    >>> print(minkowski_distance_p(x, y))
    tensor([ 2.,  1.], dtype=torch.float64)
    """
    if p == float("Inf"):
        return torch.amax(torch.abs(y - x), dim=-1)
    elif p == 1:
        return torch.sum(torch.abs(y - x), dim=-1)
    else:
        return torch.sum(torch.abs(y - x)**p, dim=-1)
Exemple #20
0
def take_measurements(detected_sources, images, box_size) -> Table:
    # Take measurements
    measurements = []

    for i in range(len(detected_sources)):
        x = detected_sources["x_peak"][i]
        y = detected_sources["y_peak"][i]
        data = images[
            :,
            x - box_size // 2 : x + box_size // 2,
            y - box_size // 2 : y + box_size // 2,
        ]

        if isinstance(data, torch.Tensor):
            assert not torch.any(torch.isnan(data))
        else:
            assert not np.any(np.isnan(data))

        try:
            if isinstance(images, torch.Tensor):
                peak_flux = torch.amax(data, axis=(-1, -2))
                # integrated_flux = torch.sum(data, axis=(-1, -2))
                measurements.append(peak_flux.to("cpu").numpy())
            else:
                peak_flux = np.max(data, axis=(-1, -2))
                # integrated_flux = np.sum(data, axis=(-1, -2))
                measurements.append(peak_flux)
        except RuntimeError:
            raise RuntimeError(("Got exception while measuring peak flux."
                   " This is likely the result of a source being out of bounds."
                   " Consider lowering the detection radius."))

    detected_sources["peak_flux"] = measurements

    return detected_sources
Exemple #21
0
def amax(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype

    mask_input = _combine_input_and_mask(amax, input, mask)
    dim_ = _canonical_dim(dim, mask_input.ndim)
    if input.layout == torch.strided:
        return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    elif input.layout == torch.sparse_coo:
        if mask is None:
            # See comment in the sparse_csr branch of prod, a similar issue arises here
            # where unspecified elements along a dimension may need to be reduced with the result
            raise ValueError('masked amax expects explicit mask for sparse_coo tensor input')
        return _sparse_coo_scatter_reduction_helper(torch.amax, mask_input, dim_, bool(keepdim), dtype)
    else:
        raise ValueError(f'masked amax expects strided or sparse_coo tensor (got {input.layout} tensor)')
    def forward(self, x):
        # check dims
        if x.dim() != 4:
            raise ValueError('expected 4D input (got {}D input)'.format(
                x.dim()))
        if self.training:
            # batch stats
            x_min = torch.amin(x, dim=(0, 1))
            x_max = torch.amax(x, dim=(0, 1))

            if self.first:
                self.max = x_max
                self.min = x_min
                self.first = False

            else:
                # update min max with masking correect entries
                max_mask = torch.greater(x_max, self.max)
                self.max = (max_mask * x_max) + \
                    (torch.logical_not(max_mask) * self.max)

                min_mask = torch.less(x_min, self.min)
                self.min = (min_mask * x_min) + \
                    (torch.logical_not(min_mask) * self.min)

            self.max_min = self.max - self.min + 1e-13

        # scale batch
        x = (x - self.min) / self.max_min

        return x
Exemple #23
0
    def reduce_heatmaps(
        heatmap: Tuple[Tensor, ...],
        reduction: Callable[[Tensor, Tensor], Tensor] = torch.max,
        mode: str = "nearest",
    ) -> Tensor:
        r"""Helper function that reduces FCOS FPN heatmaps into a single channel heatmap
        suitable for visualization.

        Args:
            heatmap (tuple of :class:`torch.Tensor`):
                FCOS FPN heatmaps to reduce

            reduction:
                Function that should accept two equally sized tensors and reduce them to a
                single output tensor. By default, heatmaps are reduced with :func:`torch.max`.
        """
        result = heatmap[0]
        C = result.shape[1]

        # reduce each FPN level
        for i in range(len(heatmap) - 1):
            current_level = F.interpolate(heatmap[i + 1],
                                          result.shape[-2:],
                                          mode=mode)
            result = reduction(current_level, result)

        # reduce across channels to a 1 channel heatmap
        if C != 1:
            result = torch.amax(result, dim=1, keepdim=True)

        return result
Exemple #24
0
 def reduction_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     return (
         torch.argmax(a),
         torch.argmin(a),
         torch.amax(a),
         torch.amin(a),
         torch.aminmax(a),
         torch.all(a),
         torch.any(a),
         torch.max(a),
         torch.min(a),
         torch.dist(a, b),
         torch.logsumexp(a, 0),
         torch.mean(a),
         torch.nanmean(a),
         torch.median(a),
         torch.nanmedian(a),
         torch.mode(a),
         torch.norm(a),
         torch.nansum(a),
         torch.prod(a),
         torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])),
         torch.std(a),
         torch.std_mean(a),
         torch.sum(a),
         torch.unique(a),
         torch.unique_consecutive(a),
         torch.var(a),
         torch.var_mean(a),
         torch.count_nonzero(a),
     )
Exemple #25
0
    def forward(self, tensor):
        if self.statistic == "dataset":
            assert hasattr(self, "mean") and hasattr(
                self, "mean_squared"
            ), "TorchScaler should be fit before used if statistics=dataset"
            assert tensor.ndim == self.mean.ndim, "Pre-computed statistics "
            if self.normtype == "mean":
                return tensor - self.mean
            elif self.normtype == "standard":
                std = torch.sqrt(self.mean_squared - self.mean ** 2)
                return (tensor - self.mean) / (std + self.eps)
            else:
                raise NotImplementedError

        else:
            if self.normtype == "mean":
                return tensor - torch.mean(tensor, self.dims, keepdim=True)
            elif self.normtype == "standard":
                return (tensor - torch.mean(tensor, self.dims, keepdim=True)) / (
                    torch.std(tensor, self.dims, keepdim=True) + self.eps
                )
            elif self.normtype == "minmax":
                return (tensor - torch.amin(tensor, dim=self.dims, keepdim=True)) / (
                    torch.amax(tensor, dim=self.dims, keepdim=True)
                    - torch.amin(tensor, dim=self.dims, keepdim=True)
                    + self.eps
                )
    def forward(self, x, h_axis=2, w_axis=3):
        """
        x: feature map BxCxHxW
        h_axis: the axis of Height
        w_axis: the axis of width
        """
        # self.in_h, self.in_w = x.shape[h_axis:]
        batch, channel = x.shape[:h_axis]

        # Calculate weighted position of joint(-1~1)
        #x_mean = get_gaussian_mean(x, 2, 3)
        #y_mean = get_gaussian_mean(x, 3, 2)
        x_mean, y_mean = xy_outputs(x, True)

        coord = torch.stack([y_mean, x_mean], dim=2)

        x_mean = x_mean.unsqueeze(-1).unsqueeze(-1).repeat(
            1, 1, self.out_h, self.out_w)
        y_mean = y_mean.unsqueeze(-1).unsqueeze(-1).repeat(
            1, 1, self.out_h, self.out_w)

        x_ind = torch.tensor(torch.linspace(-1.0, 1.0,
                                            self.out_h)).unsqueeze(-1).repeat(
                                                batch, channel, 1,
                                                self.out_w).to(x.device)
        y_ind = torch.tensor(torch.linspace(-1.0, 1.0,
                                            self.out_w)).unsqueeze(0).repeat(
                                                batch, channel, self.out_h,
                                                1).to(x.device)

        dist = (x_ind - x_mean)**2 + (y_ind - y_mean)**2

        res = torch.exp(-(dist + 1e-6).sqrt_() / (2 * self.std**2))
        max_val = torch.amax(res[0], dim=(1, 2))
        return res, coord, max_val
Exemple #27
0
def amax(input: Tensor,
         dim: DimOrDims = None,
         *,
         keepdim: Optional[bool] = False,
         dtype: Optional[DType] = None,
         mask: Optional[Tensor] = None) -> Tensor:
    """\
{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}"""
    if dtype is None:
        dtype = input.dtype
    if input.layout == torch.strided:
        if mask is None:
            mask_input = input
        else:
            identity = input.new_full([], _reduction_identity('amax', input))
            mask_input = torch.where(mask, input, identity)
        dim_ = _canonical_dim(dim, mask_input.ndim)
        return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype)
    else:
        raise ValueError(
            f'masked amax expects strided tensor (got {input.layout} tensor)')
Exemple #28
0
    def forward(self,x):
        #pool

        #combine
        x = [torch.amax(a,(2,3,4)) for a in x]
        x = torch.cat(x,dim=-1)
        x = self.linear(x)
        return x
Exemple #29
0
    def forward(self, x):
        red_mean = torch.mean(x, dim=(2, 3))
        red_max = torch.amax(x, dim=(2, 3))
        exc1 = self.channel_attention2(
            self.channel_relu(self.channel_attention1(red_mean)))
        exc2 = self.channel_attention2(
            self.channel_relu(self.channel_attention1(red_max)))
        exc = torch.sigmoid(exc1 + exc2)

        att1 = exc[:, :, None, None] * x

        feat1 = torch.mean(att1, dim=1, keepdim=True)
        feat2 = torch.amax(att1, dim=1, keepdim=True)
        feat = torch.cat((feat1, feat2), axis=1)

        att_chan = torch.sigmoid(self.channel_spatial(feat))
        return att1 * att_chan
Exemple #30
0
 def max_pool2d_layer_numpy(self, x):
     b, c, h, w = x.shape
     b_strided, c_strided, h_strided, w_strided = x.stride()
     x_strided = as_strided(x, (b, c, h // 2, w // 2, 2, 2),
                            (b_strided, c_strided, h_strided * 2,
                             w_strided * 2, h_strided, w_strided))
     result = torch.amax(x_strided, dim=(-1, -2))
     return result