Esempio n. 1
0
    def forward(self, features, rois, ratio):
        """
        bilinear interpolation
        :param features: pytorch Variable (1, C, H, W)
        :param rois: pytorch tensor, (N, 4)
        :param ratio: ratio of feature size to image size
        :return: (N, C, H_out, W_out)
        """
        feature_size = list(features.size())
        rois_in_features = rois * ratio
        rois_in_features[:, 0].clamp_(0, feature_size[2] - 1)
        rois_in_features[:, 1].clamp_(0, feature_size[3] - 1)
        rois_in_features[:, 2].clamp_(0, feature_size[2] - 1)
        rois_in_features[:, 3].clamp_(0, feature_size[3] - 1)

        h_step = ((rois_in_features[:, 2] - rois_in_features[:, 0]) /
                  (self.pool_out_size[0] * self.sub_sample))[:, None]
        w_step = ((rois_in_features[:, 3] - rois_in_features[:, 1]) /
                  (self.pool_out_size[1] * self.sub_sample))[:, None]
        y_shift = torch.arange(0, self.pool_out_size[0] * self.sub_sample).cuda().expand(rois.size(0), -1) * h_step + \
                h_step / 2 + rois_in_features[:, 0][:, None]
        x_shift = torch.arange(0, self.pool_out_size[1] * self.sub_sample).cuda().expand(rois.size(0), -1) * w_step + \
                w_step / 2 + rois_in_features[:, 1][:, None]
        y_shift = y_shift.expand(self.pool_out_size[1] * self.sub_sample, -1,
                                 -1).permute(1, 2, 0)
        x_shift = x_shift.expand(self.pool_out_size[0] * self.sub_sample, -1,
                                 -1).permute(1, 0, 2)

        centers = torch.stack((y_shift, x_shift), dim=3)
        centers = centers.contiguous().view(-1,
                                            2)  # (N, H, W, 2) -> (N*H*W, 2)

        # bilinear interpolation
        loc_y = Variable(
            torch.frac(centers[:, 0].expand(feature_size[0], feature_size[1],
                                            -1)))
        loc_x = Variable(
            torch.frac(centers[:, 1].expand(feature_size[0], feature_size[1],
                                            -1)))

        ind_left = torch.floor(centers[:, 1]).long()
        ind_right = torch.ceil(centers[:, 1]).long()
        ind_up = torch.floor(centers[:, 0]).long()
        ind_down = torch.ceil(centers[:, 0]).long()

        pre_pool = features[:, :, ind_up, ind_left] * (1 - loc_y) * (1 - loc_x) + \
                   features[:, :, ind_down, ind_left] * loc_y * (1 - loc_x) + \
                   features[:, :, ind_up, ind_right] * (1 - loc_y) * loc_x + \
                   features[:, :, ind_down, ind_right] * loc_y * loc_x

        pre_pool = pre_pool.view(
            feature_size[0] * feature_size[1],
            rois.size()[0], self.pool_out_size[0] * self.sub_sample,
            self.pool_out_size[1] * self.sub_sample).permute(1, 0, 2, 3)
        max_pool = nn.MaxPool2d(kernel_size=self.sub_sample,
                                stride=self.sub_sample,
                                padding=0)
        post_pool = max_pool(pre_pool)

        return post_pool
Esempio n. 2
0
def test3():
    # get the exponential of a tensor
    torch.exp(x)  # compute the exponential of a tensor

    torch.frac(x)  # get the fractional of each tensor. eg 9.25 -> 0.25

    torch.log(x)  # compute the log of the value in a tensor

    torch.pow(x, 2)  # to rectify the negative values do a power tranforamtion\
Esempio n. 3
0
    def forward(ctx,
                input,
                weight,
                bias=None,
                temporal="i",
                width=8,
                widtht=4,
                degree=2,
                delta=0,
                cycle_pos=16,
                cycle_neg=-16):
        ctx.save_for_backward(input, weight, bias)

        dtype = input.type()

        if temporal in ["i", "input"]:
            input_fp32 = input.detach().clone().type(torch.float)
            mantissa, exponent = torch.frexp(input_fp32)
            frac = torch.zeros_like(input_fp32)
            mantissa_new = torch.zeros_like(input_fp32)
        elif temporal in ["w", "weight"]:
            weight_fp32 = weight.detach().clone().type(torch.float)
            mantissa, exponent = torch.frexp(weight_fp32)
            frac = torch.zeros_like(weight_fp32)
            mantissa_new = torch.zeros_like(weight_fp32)

        mantissa = mantissa << width
        for i in range(degree):
            mantissa = mantissa >> widtht
            torch.frac(mantissa, out=frac)
            torch.trunc(mantissa, out=mantissa)
            torch.clamp(frac << widtht, cycle_neg + 1, cycle_pos - 1, out=frac)
            torch.add(frac >> widtht, mantissa_new >> widtht, out=mantissa_new)

        mantissa_new = mantissa_new << delta

        if temporal in ["i", "input"]:
            input_new = torch.ldexp(mantissa_new, exponent).type(dtype)
            weight_new = weight
        elif temporal in ["w", "weight"]:
            input_new = input
            weight_new = torch.ldexp(mantissa_new, exponent).type(dtype)

        output = torch.matmul(input_new, weight_new.t())

        if bias is not None:
            output += bias.unsqueeze(0).expand_as(output)
        return output
Esempio n. 4
0
def vnoise_(x, freq=10, phase=0):
    x = x * freq + phase
    i = torch.floor(x)
    f = torch.frac(x)
    a = hash(i)
    b = hash(i+1)
    return torch.lerp(a, b, smoothstep(f))
Esempio n. 5
0
def adjust_hue(input: torch.Tensor, hue_factor: float) -> torch.Tensor:
    r"""Adjust hue of an image.

    See :class:`~kornia.color.AdjustHue` for details.
    """

    if not torch.is_tensor(input):
        raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")

    if not isinstance(hue_factor, float) and -0.5 < hue_factor < 0.5:
        raise TypeError(
            f"The hue_factor should be a float number in the range between"
            f" [-0.5, 0.5]. Got {type(hue_factor)}")

    # convert the rgb image to hsv
    x_hsv: torch.Tensor = rgb_to_hsv(input)

    # unpack the hsv values
    h, s, v = torch.chunk(x_hsv, chunks=3, dim=-3)

    # transform the hue value and appl module
    h_out: torch.Tensor = torch.frac(h * hue_factor)

    # pack back back the corrected hue
    x_adjusted: torch.Tensor = torch.cat([h_out, s, v], dim=-3)

    # convert back to rgb
    out: torch.Tensor = hsv_to_rgb(x_adjusted)

    return out
Esempio n. 6
0
    def _create_target(self, outputs, org_targets):
        decoded_outputs = self._decode_outputs(outputs, copy=True)
        predicted_bboxes = decoded_outputs[..., :4].data

        batch_size, _, fsize, _, n_channels = outputs.shape
        dtype, device = outputs.dtype, outputs.device

        out_shape = outputs.shape[:4]
        target_mask = torch.zeros((*out_shape, n_channels - 1),
                                  dtype=dtype,
                                  device=device)
        object_mask = torch.ones(out_shape, dtype=dtype, device=device)
        target_scale = torch.zeros((*out_shape, 2), dtype=dtype, device=device)
        targets = torch.zeros_like(outputs)

        n_targets_all = (org_targets.sum(dim=2) > 0).sum(dim=1)

        org_targets = org_targets.clone()
        org_targets[:, :, :4] *= fsize
        org_grid_indices = org_targets[:, :, :2].int()

        for image_index in range(batch_size):
            n_targets = int(n_targets_all[image_index])
            if n_targets == 0:
                continue
            gt_grid_indices = org_grid_indices[image_index, :n_targets]

            best_anchor_indices = self._select_best_anchor_indices(
                org_targets[image_index][:n_targets, :4])
            is_iou_high_enough = self._check_if_iou_is_high_enough(
                predicted_bboxes[image_index][:n_targets],
                org_targets[image_index][:n_targets, :4])

            object_mask[image_index, :n_targets] = 1 - is_iou_high_enough

            for index, anchor_index in enumerate(best_anchor_indices):
                if anchor_index not in self.anchor_indices:
                    continue
                anchor_index = (
                    anchor_index == self.anchor_indices).nonzero().flatten()

                xindex, yindex = gt_grid_indices[index]
                object_mask[image_index, anchor_index, yindex, xindex] = 1
                target_mask[image_index, anchor_index, yindex, xindex, :] = 1
                targets[image_index, anchor_index, yindex,
                        xindex, :2] = torch.frac(org_targets[image_index,
                                                             index, :2])
                targets[image_index, anchor_index, yindex, xindex,
                        2:4] = torch.log(org_targets[image_index, index, 2:4] /
                                         self.anchors[anchor_index] + 1e-16)
                targets[image_index, anchor_index, yindex, xindex, 4] = 1
                targets[image_index, anchor_index, yindex, xindex,
                        5 + org_targets[image_index, index, 4].int()] = 1
                target_scale[image_index, anchor_index, yindex,
                             xindex, :] = torch.sqrt(
                                 2 - org_targets[image_index, index, 2] *
                                 org_targets[image_index, index, 3] / fsize /
                                 fsize)
        return targets, target_mask, object_mask, target_scale
Esempio n. 7
0
def gnoise(x, freq=10, phase=0):
    x = x + phase
    x = x * freq
    i = torch.floor(x)
    f = torch.frac(x)
    a = hash(i)
    b = hash(i + 1)
    return torch.lerp(a*f, b*(1-f), quintic(f))
def test_appro():
    print("Enter test_appro")
    a = torch.tensor([-3.5, -3.1415, -3., 0.0, 3., 3.1415, 3.5])
    print("orig:", a)
    print("floor: ", torch.floor(a))
    print("ceil: ", torch.ceil(a))
    print("trunc: ", torch.trunc(a))
    print("frac: ", torch.frac(a))
    print("round: ", torch.round(a))
    print("Exit test_appro")
Esempio n. 9
0
    def create_embedding_fn(self):
        embed_fns = []
        d = self.input_dims
        out_dim = 0
        if self.include_input:
            embed_fns.append(lambda x: x)
            out_dim += d

        for discr in range(self.num_discretization):
            embed_fns.append(lambda x, discr=discr: torch.floor(
                torch.frac(x * (self.basis**discr)) * self.basis) /
                             (self.basis - 1.0))
            out_dim += d

        self.embed_fns = embed_fns
        self.out_dim = out_dim
Esempio n. 10
0
    def forward(ctx, input, prec):
        """
        Forward
        :param ctx:
        :param input:
        :param prec:
        :return:
        """
        gpuavail = torch.cuda.is_available()
        device = torch.device(gl_cuda_device if gpuavail else "cpu")

        ctx.save_for_backward(input)

        output = (input / prec - torch.frac(input / prec)) * prec

        return output
Esempio n. 11
0
def adjust_hue(input: torch.Tensor,
               hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
    r"""Adjust hue of an image.

    See :class:`~kornia.color.AdjustHue` for details.
    """

    if not torch.is_tensor(input):
        raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")

    if not isinstance(hue_factor, (float, torch.Tensor)):
        raise TypeError(
            f"The hue_factor should be a float number or torch.Tensor in the range between"
            f" [-0.5, 0.5]. Got {type(hue_factor)}")

    if isinstance(hue_factor, float):
        hue_factor = torch.tensor([hue_factor])

    hue_factor = hue_factor.to(input.device).to(input.dtype)

    if ((hue_factor < -0.5) | (hue_factor > 0.5)).any():
        raise ValueError(
            f"Hue-factor must be in the range [-0.5, 0.5]. Got {hue_factor}")

    for _ in input.shape[1:]:
        hue_factor = torch.unsqueeze(hue_factor, dim=-1)

    # convert the rgb image to hsv
    x_hsv: torch.Tensor = rgb_to_hsv(input)

    # unpack the hsv values
    h, s, v = torch.chunk(x_hsv, chunks=3, dim=-3)

    # transform the hue value and appl module
    h_out: torch.Tensor = torch.frac(h + hue_factor)

    # pack back back the corrected hue
    x_adjusted: torch.Tensor = torch.cat([h_out, s, v], dim=-3)

    # convert back to rgb
    out: torch.Tensor = hsv_to_rgb(x_adjusted)

    return out
Esempio n. 12
0
    def forward(self, x: torch.Tensor):

        # Cast the input to the correct data type
        x = x.type(torch.FloatTensor)
        if torch.cuda.is_available():
            x = x.cuda()

        # Render the tensor shape appropriate
        x = torch.unsqueeze(torch.unsqueeze(x, dim=0), dim=0)

        # Check if the input tensor x has the correct number of dimensions
        # The input tensor must be of shape [batch, ch_in, iT, iH, iW, feature set]
        if x.ndim != 5:
            raise ValueError(
                f'The input tensor needs to be 5 dimensional, but has {x.ndim} dimensions!'
            )
        if x.shape[1] != 1:
            raise ValueError(
                f'The number of input channels is not correct ({x.shape[1]} instead of 1)!'
            )

        # Convolve the input tensor with the weights and remove the first dimension
        offset = self.block_size // 2 + 1
        padding = [offset, offset, offset]
        out = F.conv3d(x, self.weight, None, self.stride, padding=padding)
        out = torch.squeeze(out, dim=0)  # - torch.Size([3, 195, 231, 195])

        # convoluted arrays in z, y x = out[0], out[1], out[2]

        #   cord_z, cord_y, cord_x = are the starting-window-coordinates of the padded image
        # and correspond to the central window coordinates on the original image

        # ------------------------------------
        eps = sys.float_info.epsilon
        with torch.no_grad():

            #   magnitude
            mag = out.norm(p="fro", dim=0)

            #   theta
            theta = torch.atan2(out[1], out[2])

            # phi
            phi = torch.acos(torch.div(out[0], mag + eps))
            mag = torch.unsqueeze(torch.unsqueeze(mag, dim=0), dim=0)
            theta = torch.unsqueeze(torch.unsqueeze(theta, dim=0), dim=0)
            phi = torch.unsqueeze(torch.unsqueeze(phi, dim=0), dim=0)

            # Binning Mag with linear interpolation
            theta_raw_ind = (theta / self.max_phi_angle * self.phi_bins)
            theta_frac_ind = torch.frac(theta_raw_ind)

            phi_raw_ind = (phi / self.max_phi_angle * self.phi_bins)
            phi_frac_ind = torch.frac(phi_raw_ind)

            # --------------------------
            # creating a torch containing lower and upper indices for theta and phi (4 dimensions)
            # torch will be like this [theta lower ind, theta upper ind, phi lower ind, phi upper ind]

            conv, d, h, w = out.size()
            int_indices = torch.zeros(
                4, d, h, w, dtype=torch.int64,
                device=x.device)  # torch.Size([4, 195, 231, 195])
            int_indices = torch.unsqueeze(
                int_indices, 0)  # torch.Size([1, 4, 195, 231, 195])

            # theta indices
            #   lower theta indices (0)
            int_indices[
                0, 0, :, :, :] = theta_raw_ind.floor().long() % self.theta_bins
            #   upper theta indices (1)
            int_indices[
                0, 1, :, :, :] = theta_raw_ind.ceil().long() % self.theta_bins

            # phi indices
            #   lower phi indices   (2)
            int_indices[
                0, 2, :, :, :] = phi_raw_ind.floor().long() % self.phi_bins
            #   upper phi indices   (3)
            int_indices[0,
                        3, :, :, :] = phi_raw_ind.ceil().long() % self.phi_bins

            # int_indices = torch.unsqueeze(int_indices, dim=0)

            # convert int indices to int64

            # -------------------------- creating a torch containing the "fractional parts" (%) of lower and upper
            # indices for theta and phi (4 dimensions)
            # torch will be like this [% theta, 1 - % theta, % phi, 1 - % phi]

            frac_parts = torch.zeros(int_indices.size(), device=x.device)

            # theta fractions
            #   lower theta         (0)
            frac_parts[:, 0, :, :, :] = torch.abs(theta_frac_ind)
            #   upper theta         (1)
            frac_parts[:, 1, :, :, :] = torch.abs(1 - theta_frac_ind)

            # phi fractions
            #   lower phi           (2)
            frac_parts[:, 2, :, :, :] = torch.abs(phi_frac_ind)
            #   upper phi           (3)
            frac_parts[:, 3, :, :, :] = torch.abs(1 - phi_frac_ind)

            # -------------------------- creating a torch containing the "composed fractional parts" (%) of lower and
            # upper indices for theta and phi (4 dimensions) torch will be like this:
            # [(% theta) x (% phi),   (% theta) x (1 - % phi),  (1 - % theta) x (% phi),  (1 - % theta) x (1 - % phi)]

            composed_frac_parts = torch.zeros(int_indices.size(),
                                              device=x.device)

            # theta
            #  (% theta) x (% phi)          (0)
            composed_frac_parts[:, 0, :, :, :] = torch.mul(
                frac_parts[0, 0, :, :, :], frac_parts[0, 2, :, :, :])
            #  (% theta) x (1 - % phi)      (1)
            composed_frac_parts[:, 1, :, :, :] = torch.mul(
                frac_parts[0, 0, :, :, :], frac_parts[0, 3, :, :, :])

            # phi indices
            #  (1 - % theta) x (% phi)      (2)
            composed_frac_parts[:, 2, :, :, :] = torch.mul(
                frac_parts[0, 1, :, :, :], frac_parts[0, 2, :, :, :])
            #  (1 - % theta) x (1 - % phi)  (3)
            composed_frac_parts[:, 3, :, :, :] = torch.mul(
                frac_parts[0, 1, :, :, :], frac_parts[0, 3, :, :, :])

            # ---------- scattering composed fractions to the right angle (theta or phi)
            # creating tensors containing theta or phi bins (1)
            n, c, d, h, w = x.size()

            theta_lower_ind_fracs_f_0 = torch.zeros(
                (1, self.theta_bins, d + 2 * offset - 2, h + 2 * offset - 2,
                 w + 2 * offset - 2),
                dtype=torch.float,
                device=x.device)

            theta_lower_ind_fracs_f_1 = torch.zeros(
                theta_lower_ind_fracs_f_0.size(), device=x.device)

            theta_upper_ind_fracs_f_2 = torch.zeros(
                theta_lower_ind_fracs_f_0.size(), device=x.device)

            theta_upper_ind_fracs_f_3 = torch.zeros(
                theta_lower_ind_fracs_f_0.size(), device=x.device)

            # phi_upper_ind = torch.zeros(phi_upper_ind.size())
            # theta_lower_indices # torch.Size([1, theta_bins (1), 195, 231, 195])

            # compesed fracs to be organized

            low_p_ordered_by_low_t = torch.zeros(
                (1, self.theta_bins, d + 2 * offset - 2, h + 2 * offset - 2,
                 w + 2 * offset - 2),
                dtype=torch.int64,
                device=x.device)

            upper_p_ordered_by_low_t = torch.zeros(
                low_p_ordered_by_low_t.size(),
                dtype=torch.int64,
                device=x.device)

            lower_p_ordered_by_upper_t = torch.zeros(
                low_p_ordered_by_low_t.size(),
                dtype=torch.int64,
                device=x.device)

            upper_p_ordered_by_upper_t = torch.zeros(
                low_p_ordered_by_low_t.size(),
                dtype=torch.int64,
                device=x.device)

            # here we got a set of fractions scattered through the different lower theta indices
            int_indices = torch.unsqueeze(int_indices, dim=0)

            theta_lower_ind_fracs_f_0.scatter_(
                1, int_indices[:, :, 0, :, :, :],
                torch.mul(composed_frac_parts[:, 0, :, :, :], mag))

            theta_lower_ind_fracs_f_1.scatter_(
                1, int_indices[:, :, 0, :, :, :],
                torch.mul(composed_frac_parts[:, 1, :, :, :], mag))

            theta_upper_ind_fracs_f_2.scatter_(
                1, int_indices[:, :, 1, :, :, :],
                torch.mul(composed_frac_parts[:, 2, :, :, :], mag))

            theta_upper_ind_fracs_f_3.scatter_(
                1, int_indices[:, :, 1, :, :, :],
                torch.mul(composed_frac_parts[:, 3, :, :, :], mag))

            #                                lower theta indices (0) #   lower phi indices   (2)
            low_p_ordered_by_low_t.scatter_(1, int_indices[:, :, 0, :, :, :],
                                            int_indices[:, :, 2, :, :, :])
            low_p_ordered_by_low_t = torch.unsqueeze(low_p_ordered_by_low_t,
                                                     dim=0)
            low_p_ordered_by_low_t = torch.transpose(low_p_ordered_by_low_t, 1,
                                                     2)

            #                                lower theta indices (0) #   upper phi indices   (3)
            upper_p_ordered_by_low_t.scatter_(1, int_indices[:, :, 0, :, :, :],
                                              int_indices[:, :, 3, :, :, :])
            upper_p_ordered_by_low_t = torch.unsqueeze(
                upper_p_ordered_by_low_t, dim=0)
            upper_p_ordered_by_low_t = torch.transpose(
                upper_p_ordered_by_low_t, 1, 2)

            #                                upper theta indices (1) #   lower phi indices   (2)
            lower_p_ordered_by_upper_t.scatter_(1, int_indices[:, :,
                                                               1, :, :, :],
                                                int_indices[:, :, 2, :, :, :])
            lower_p_ordered_by_upper_t = torch.unsqueeze(
                lower_p_ordered_by_upper_t, dim=0)
            lower_p_ordered_by_upper_t = torch.transpose(
                lower_p_ordered_by_upper_t, 1, 2)

            #                                upper theta indices (1) #   upper phi indices   (3)
            upper_p_ordered_by_upper_t.scatter_(1, int_indices[:, :,
                                                               1, :, :, :],
                                                int_indices[:, :, 3, :, :, :])
            upper_p_ordered_by_upper_t = torch.unsqueeze(
                upper_p_ordered_by_upper_t, dim=0)
            upper_p_ordered_by_upper_t = torch.transpose(
                upper_p_ordered_by_upper_t, 1, 2)

            theta_lower_ind_fracs_f_0 = torch.unsqueeze(
                theta_lower_ind_fracs_f_0, dim=0)
            theta_lower_ind_fracs_f_0 = torch.transpose(
                theta_lower_ind_fracs_f_0, 1, 2)

            theta_lower_ind_fracs_f_1 = torch.unsqueeze(
                theta_lower_ind_fracs_f_1, dim=0)
            theta_lower_ind_fracs_f_1 = torch.transpose(
                theta_lower_ind_fracs_f_1, 1, 2)

            theta_upper_ind_fracs_f_2 = torch.unsqueeze(
                theta_upper_ind_fracs_f_2, dim=0)
            theta_upper_ind_fracs_f_2 = torch.transpose(
                theta_upper_ind_fracs_f_2, 1, 2)

            theta_upper_ind_fracs_f_3 = torch.unsqueeze(
                theta_upper_ind_fracs_f_3, dim=0)
            theta_upper_ind_fracs_f_3 = torch.transpose(
                theta_upper_ind_fracs_f_3, 1, 2)

            # freeing up unused tensors from memory
            n, c, d, h, w = x.shape
            del out
            del x
            del int_indices
            del composed_frac_parts
            del mag
            del phi
            del theta
            torch.cuda.empty_cache()

            t = torch.cuda.get_device_properties(0).total_memory
            c = torch.cuda.memory_cached(0)
            a = torch.cuda.memory_allocated(0)
            f = c - a  # free inside cache

            # bin assignment
            out_plus_bins = torch.zeros(  # torch.Size([1, 8, 8, 195, 231, 195])
                (n, self.theta_bins, self.phi_bins, d + 2 * offset - 2,
                 h + 2 * offset - 2, w + 2 * offset - 2),
                dtype=torch.float,
                device=theta_upper_ind_fracs_f_3.device)

            # assigning low t x low p
            out_plus_bins.scatter_(2, low_p_ordered_by_low_t,
                                   theta_lower_ind_fracs_f_0)
            out_plus_bins.scatter_add_(2, upper_p_ordered_by_low_t,
                                       theta_lower_ind_fracs_f_1)
            out_plus_bins.scatter_add_(2, lower_p_ordered_by_upper_t,
                                       theta_upper_ind_fracs_f_2)
            out_plus_bins.scatter_add_(2, upper_p_ordered_by_upper_t,
                                       theta_upper_ind_fracs_f_3)

            out_plus_bins = torch.reshape(
                out_plus_bins,
                (n, self.theta_bins * self.phi_bins, d + 2 * offset - 2,
                 h + 2 * offset - 2, w + 2 * offset - 2))

            return self.pooler(out_plus_bins)
Esempio n. 13
0
 def test_frac(x, y):
     c = torch.frac(torch.add(x, y))
     return c
Esempio n. 14
0
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
Esempio n. 15
0
    def forward(ctx,
                input,
                weight,
                bias=None,
                temporal="i",
                width=8,
                widtht=4,
                degree=2,
                delta=0,
                cycle_pos=16,
                cycle_neg=-16,
                rounding="round",
                quantilei=1,
                quantilew=1):
        ctx.save_for_backward(input, weight, bias)
        input_fp32 = input.detach().clone().to(torch.float)
        weight_fp32 = weight.detach().clone().to(torch.float)

        rshift_i, rshift_w, _ = rshift_offset(input_fp32, weight_fp32, width,
                                              width, rounding, quantilei,
                                              quantilew)

        if temporal in ["i", "input"]:
            input_new = torch.zeros_like(input_fp32)
            frac = torch.zeros_like(input_fp32)
            torch.trunc((input_fp32 >> rshift_i).clamp(-2**width + 1,
                                                       2**width - 1),
                        out=input_fp32)
            for i in range(degree):
                input_fp32 = input_fp32 >> widtht
                torch.frac(input_fp32, out=frac)
                torch.trunc(input_fp32, out=input_fp32)
                torch.clamp(frac << widtht,
                            cycle_neg + 1,
                            cycle_pos - 1,
                            out=frac)
                torch.add(frac >> widtht, input_new >> widtht, out=input_new)
            input_new = (input_new <<
                         (delta + width + rshift_i)).type(weight.type())
            weight_new = weight
        elif temporal in ["w", "weight"]:
            weight_new = torch.zeros_like(weight_fp32)
            frac = torch.zeros_like(weight_fp32)
            torch.trunc(
                (weight_fp32 >> rshift_w).clamp(-2**width + 1, 2**width - 1),
                out=weight_fp32)
            for i in range(degree):
                weight_fp32 = weight_fp32 >> widtht
                torch.frac(weight_fp32, out=frac)
                torch.trunc(weight_fp32, out=weight_fp32)
                torch.clamp(frac << widtht,
                            cycle_neg + 1,
                            cycle_pos - 1,
                            out=frac)
                torch.add(frac >> widtht, weight_new >> widtht, out=weight_new)
            input_new = input
            weight_new = (weight_new <<
                          (delta + width + rshift_w)).type(input.type())

        output = torch.matmul(input_new, weight_new.t())

        if bias is not None:
            output += bias.unsqueeze(0).expand_as(output)
        return output
Esempio n. 16
0
# In[96]:


#how to get the fractional portion of each tensor


# In[97]:


torch.add(x,10)


# In[98]:


torch.frac(torch.add(x,10))


# In[99]:


# compute the log of the values in a tensor


# In[100]:


x


# In[101]:
Esempio n. 17
0
torch.float_power(torch.randint(10, (4, )), 2)
torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5]))

# floor
torch.floor(a)

# floor_divide
torch.floor_divide(torch.tensor([4., 3.]), torch.tensor([2., 2.]))
torch.floor_divide(torch.tensor([4., 3.]), 1.4)

# fmod
torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5)

# frac
torch.frac(torch.tensor([1, 2.5, -3.2]))

# imag
torch.randn(4, dtype=torch.cfloat).imag

# ldexp
torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))

# lerp
start = torch.arange(1., 5.)
end = torch.empty(4).fill_(10)
torch.lerp(start, end, 0.5)
torch.lerp(start, end, torch.full_like(start, 0.5))

# lgamma
Esempio n. 18
0
#将输入input张量每个元素的夹紧到区间 [min,max],并返回结果到一个新张量。
torch.clamp(a, min=0, max=1)
torch.clamp(a, min=0)  #只对最小值进行限制

#将input逐元素除以标量值value,并返回结果到输出张量out
torch.div(a, 0.1)
torch.div(a, torch.randn(4, 4))  #两个矩阵元素相除

#指数函数,返回一个新张量,包含输入input张量每个元素的指数
torch.exp(a)

#计算余数
torch.fmod(torch.Tensor([1, 2, 3]), 2)

#返回每个元素的小数部分
torch.frac(torch.Tensor([1, 2.4, -3.5]))

#自然对数
torch.log(torch.Tensor([1, 5, 10]))

#计算input+1的自然对数yi=log(xi+1)
torch.log1p(torch.Tensor([1, 5, 10]))

#乘法,用标量值value乘以输入input的每个元素,并返回一个新的结果张量
torch.mul(a, value=2)

#取负值
torch.neg(torch.Tensor([-1, 2, -3]))

#求n次幂
torch.pow(x, 2)
Esempio n. 19
0
 t = torch.randn(1, 3)
 t1 = torch.randn(3, 1)
 t2 = torch.randn(1, 3)
 print(x)
 print(x.abs())
 print(x.neg())
 print(x.acos())
 print(torch.ceil(x))
 print(torch.floor(x))
 print(torch.round(x))
 # print(torch.diff(x))
 print(torch.clamp(x, min=-0.5, max=0.5))
 print(torch.clamp(x, min=0.5))
 print(torch.clamp(x, max=0.5))
 print(torch.trunc(x))  # truncated integer values
 print(torch.frac(x))  # fractional portion of each element
 print(x.add(1))
 print(torch.exp(x))
 print(torch.expm1(x))
 print(torch.logit(x))
 print(torch.mul(x, 100))
 print(torch.addcdiv(t, t1, t2, value=0.1))  # t + value * t1 / t2
 print(torch.addcmul(t, t1, t2, value=0.1))  # t + value * t1 * t2
 print(torch.addmm(M, mat1, mat2))  # beta * M + alpha * mat1 * mat2
 print(torch.matmul(mat1, mat2))  # mat1 * mat2
 print(torch.mm(mat1, mat2))  # mat1 * mat2
 print(torch.matrix_power(mat1, 2))  # mat1 * mat1
 print(torch.addmv(x, mat1, x))  # β x+α (mat * x)
 print(torch.mv(mat1, x))  # mat * vec
 print(torch.outer(x, x))  # vec1⊗vec2
 print(torch.renorm(mat1, 1, 0, 5))
Esempio n. 20
0
t.acos(input, out=None)  #返回张量:反余弦
t.add(input, value, out=None)  #返回张量:加value值
#t.addcdiv(input, value=1, tensor1, tensor2, out=None)   #用tensor2对tensor1逐元素相除,然后乘以标量值value 并加到tensor
#t.addcmul(input, value=1, tensor1, tensor2, out=None)   #用tensor2对tensor1逐元素相乘,并对结果乘以标量值value然后加到tensor。 张量的形状不需要匹配,但元素数量必须一致。 如果输入是FloatTensor or DoubleTensor类型,则value 必须为实数,否则须为整数。
t.asin(input, out=None)  #取反正弦
t.atan(input, out=None)  #取反正切
#t.atan2(input1, input2, out=None)      #返回一个新张量,包含两个输入张量input1和input2的反正切函数
t.ceil(input, out=None)  #天井函数,对输入input张量每个元素向上取整, 即取不小于每个元素的最小整数,并返回结果到输出。
#t.clamp(input, min, max, out=None)     #将输入input张量每个元素的夹紧到区间 [min,max],并返回结果到一个新张量。
t.cos(input, out=None)
t.cosh(input, out=None)
t.div(input, value, out=None)  #将input逐元素除以标量值value,并返回结果到输出张量out
t.exp(input, out=None)
t.floor(input, out=None)  #床函数: 返回一个新张量,包含输入input张量每个元素的floor,即不小于元素的最大整数。
#t.fmod(input, divisor, out=None)   #计算除法余数。 除数与被除数可能同时含有整数和浮点数。此时,余数的正负与被除数相同。
t.frac(input, out=None)  #返回小数部分
#t.lerp(start, end, weight, out=None)   #对两个张量以start,end做线性插值, 将结果返回到输出张量。
t.log(input, out=None)
t.log1p(input, out=None)  #计算 input+1的自然对数 yi=log(xi+1)
t.mul(input, value,
      out=None)  #用标量值value乘以输入input的每个元素,并返回一个新的结果张量。 out=tensor∗value
t.neg(input, out=None)  #返回一个新张量,包含输入input 张量按元素取负。 即, out=−1∗input
#t.pow(input, exponent, out=None)   #对输入input的按元素求exponent次幂值,并返回结果张量。 幂值exponent 可以为单一 float 数或者与input相同元素数的张量。
t.reciprocal(input, out=None)  #返回一个新张量,包含输入input张量每个元素的倒数,即 1.0/x。
#t.remainder(input, divisor, out=None)  #返回一个新张量,包含输入input张量每个元素的除法余数。 除数与被除数可能同时包含整数或浮点数。余数与除数有相同的符号。
t.round(input, out=None)  #返回一个新张量,将输入input张量每个元素舍入到最近的整数。
t.rsqrt(input, out=None)  #返回一个新张量,包含输入input张量每个元素的平方根倒数。
t.sigmoid(input, out=None)  #返回一个新张量,包含输入input张量每个元素的sigmoid值。
t.sign(input, out=None)  #符号函数:返回一个新张量,包含输入input张量每个元素的正负。
t.sin(input, out=None)
t.sinh(input, out=None)
Esempio n. 21
0
#%%
tp = torch.pow(torch.arange(1, 4), torch.arange(3))
print('pow = {}'.format(tp))
te = torch.exp(torch.tensor([0.1, -0.01]))
print('exp = {}'.format(te))
ts = torch.sin(torch.tensor([
    [
        3.14 / 4,
    ],
]))
print('sin = {}'.format(ts))

#%%
t5 = torch.arange(5)
tf = torch.frac(t5 * 0.3)
print('frac = {}'.format(tf))
tc = torch.clamp(t5, 0.5, 3.5)
print('clamp = {}'.format(tc))

#%% [markdown]
# 张量的拼接

#%%
tp = torch.arange(12).reshape(3, 4)
tn = -tp
tc0 = torch.cat([tp, tn], 0)
print('tc0 = {}'.format(tc0))
tc1 = torch.cat([tp, tp, tn, tn], 1)
print('tc1 = {}'.format(tc1))