Ejemplo n.º 1
0
    def __call__(self, img):
        shape = img.shape

        # if the noise should be black and white: copy one mask to all colourchannels
        if self.colored:
            salt_mask = torch.trunc(self.amount_salt * torch.ones(shape) +
                                    torch.rand(shape))
            pepper_mask = torch.trunc(self.amount_salt * torch.ones(shape) +
                                      torch.rand(shape))
        else:
            shape = (1, shape[1], shape[2])
            salt_mask = torch.trunc(self.amount_salt * torch.ones(shape) +
                                    torch.rand(shape))
            pepper_mask = torch.trunc(self.amount_salt * torch.ones(shape) +
                                      torch.rand(shape))
            # we need to expand the maps to every channel
            salt_mask = torch.cat((salt_mask, salt_mask, salt_mask), dim=0)
            pepper_mask = torch.cat((pepper_mask, pepper_mask, pepper_mask),
                                    dim=0)

        # apply salt
        img.masked_scatter_(salt_mask.byte(), salt_mask)

        # apply pepper
        img.masked_scatter_(pepper_mask.byte(),
                            torch.ones(shape) - pepper_mask)

        return img
Ejemplo n.º 2
0
    def devoxelize(self, points, bandwidth):
        # points = points.cuda()
        try:
            x = points[:, 0]
        except IndexError:
            print(points.shape[0], points.shape[1])

        y = points[:, 1]
        z = points[:, 2]

        # Fistly, we get the centroid, then translate the points
        centroid_x = torch.sum(x) / points.shape[0]
        centroid_y = torch.sum(y) / points.shape[0]
        centroid_z = torch.sum(z) / points.shape[0]
        centroid = torch.tensor([centroid_x, centroid_y, centroid_z])
        points = points.float()
        points -= centroid.cuda()

        # After normalization, compute the distance between the sphere and points

        radius = torch.sqrt(points[..., 0]**2 + points[..., 1]**2 +
                            points[..., 2]**2)

        # print(points.size())
        # print(radius.size())

        radius = radius.unsqueeze(2)
        radius = radius.repeat(1, 1, 3)
        points_on_sphere = points / radius
        # ssgrid = sgrid.reshape(-1, 3)
        # phi, theta = S2.change_coordinates(ssgrid, p_from='C', p_to='S')
        out = self.change_coordinates(points_on_sphere, p_from='C', p_to='S')

        phi = torch.tensor(out[..., 0]).cuda()
        theta = torch.tensor(out[..., 1]).cuda()
        pi = torch.acos(
            torch.zeros(1)).item() * 2  # which is 3.1415927410125732
        phi = phi
        theta = theta % (pi * 2)

        b = bandwidth  # bandwidth
        # By computing the m,n, we can find
        # the neighbours on the sphere
        m = torch.trunc((phi - pi / (4 * b)) / (pi / (2 * b)))
        m = m.long()
        n = torch.trunc(theta / (pi / b))
        n = n.long()
        # need to mind the boundary issues
        m_boundary = m >= 2 * b
        n_boundary = n >= 2 * b
        m[m_boundary] = 2 * b - 1
        n[n_boundary] = 2 * b - 1
        # print(m.max())
        # print(m.min())
        # print(n.max())
        # print(n.min())
        # print("happy devoxelizing"+str(torch.cuda.current_device()))

        return m.cuda(), n.cuda()
Ejemplo n.º 3
0
    def forward(ctx,
                input,
                weight,
                bias=None,
                temporal="i",
                width=8,
                widtht=4,
                degree=2,
                delta=0,
                cycle_pos=16,
                cycle_neg=-16):
        ctx.save_for_backward(input, weight, bias)

        dtype = input.type()

        if temporal in ["i", "input"]:
            input_fp32 = input.detach().clone().type(torch.float)
            mantissa, exponent = torch.frexp(input_fp32)
            frac = torch.zeros_like(input_fp32)
            mantissa_new = torch.zeros_like(input_fp32)
        elif temporal in ["w", "weight"]:
            weight_fp32 = weight.detach().clone().type(torch.float)
            mantissa, exponent = torch.frexp(weight_fp32)
            frac = torch.zeros_like(weight_fp32)
            mantissa_new = torch.zeros_like(weight_fp32)

        mantissa = mantissa << width
        for i in range(degree):
            mantissa = mantissa >> widtht
            torch.frac(mantissa, out=frac)
            torch.trunc(mantissa, out=mantissa)
            torch.clamp(frac << widtht, cycle_neg + 1, cycle_pos - 1, out=frac)
            torch.add(frac >> widtht, mantissa_new >> widtht, out=mantissa_new)

        mantissa_new = mantissa_new << delta

        if temporal in ["i", "input"]:
            input_new = torch.ldexp(mantissa_new, exponent).type(dtype)
            weight_new = weight
        elif temporal in ["w", "weight"]:
            input_new = input
            weight_new = torch.ldexp(mantissa_new, exponent).type(dtype)

        output = torch.matmul(input_new, weight_new.t())

        if bias is not None:
            output += bias.unsqueeze(0).expand_as(output)
        return output
def extract_feat(Data, keys, output_dir, args=None, num_attr=256):
    num = len(Data)
    name = 'attributes'
    if os.path.exists(os.path.join(output_dir, name + '.h5')):
        print('The file' + name + '.h5 already exists in ' + output_dir)
        name = name + '_' + args.option
    if args.hard_label:
        name = name + '_hard_label'

    with h5py.File(os.path.join(output_dir, name + '.h5')) as file_attr:

        for i in range(0, num):
            # get the inputs
            inputs = Data[i, :]
            inputs = Variable(inputs.cuda().float())

            # forward
            output = test_net('test', inputs)
            output = torch.nn.Sigmoid()(output)

            if args.hard_label:
                output = torch.trunc(2 * output.data)

            d_set_attr = file_attr.create_dataset(keys[i], (num_attr, ),
                                                  dtype="float")
            d_set_attr[...] = output.data.cpu().float().numpy()

            if i % 500 == 0:
                print('processing %d/%d (%.2f%% done)' %
                      (i, num, i * 100.0 / num))

        file_attr.close()
    print('Finish extract features for ' + output_dir)
Ejemplo n.º 5
0
    def _get_epoch_indices(self):
        g = torch.Generator()
        g.manual_seed(self.epoch)

        # apply curriculum on repeat factors
        phase = self.epoch / (self.max_epochs - 1)
        alpha = self.curriculum_func(phase)
        rep_factors = (1 - alpha) + alpha * self.repeat_factors.clone()

        # stochastic rounding on repeat factors so that repeat factors slightly
        # differ for every epoch.
        rep_int_part = torch.trunc(rep_factors)
        rep_frac_part = rep_factors - rep_int_part
        rands = torch.rand(len(rep_frac_part), generator=g)
        stochastic_rep_factors = rep_int_part + (rands < rep_frac_part).float()

        # Construct a list of indices in which we repeat images as specified
        img_indices = []
        for dataset_index, rep_factor in enumerate(stochastic_rep_factors):
            img_indices.extend([dataset_index] * int(rep_factor.item()))

        # image index list for every epoch reflected repeatation
        rand_indices = torch.randperm(len(img_indices),
                                      generator=g).tolist()[:len(self.dataset)]
        indices = np.asarray(img_indices)[rand_indices].tolist()

        if self.rank == 0:
            log_str = 'Epoch: {}/{}, '.format(self.epoch + 1, self.max_epochs)
            log_str += 'total copied indices: {}, net indices: {}, '.format(
                len(img_indices), len(set(indices)))
            log_str += 'len(dataset): {}'.format(len(self.dataset))
            print(log_str)
        return indices
Ejemplo n.º 6
0
    def __init__(self, repeat_thresh, shuffle=True, seed=None):
        """
        Args:
            dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
            repeat_thresh (float): frequency threshold below which data is repeated.
            shuffle (bool): whether to shuffle the indices or not
            seed (int): the initial seed of the shuffle. Must be the same
                across all workers. If None, will use a random seed shared
                among workers (require synchronization among all workers).
        """
        self._shuffle = shuffle
        if seed is None:
            seed = shared_random_seed()
        self._seed = int(seed)

        #self._rank = comm.get_rank()
        #self._world_size = comm.get_world_size()

        # Get fractional repeat factors and split into whole number (_int_part)
        # and fractional (_frac_part) parts.
        #/scratch_net/knurrhahn/knurrhahn/shijain/ade_deeplab/DeepLabV3Plus-Pytorch
        with open(
                '/scratch_net/knurrhahn/knurrhahn/shijain/ade_deeplab/DeepLabV3Plus-Pytorch/repeatfactors.pkl',
                "rb") as f:
            rep_factors = pickle.load(f)
        rep_factors = torch.tensor(rep_factors, dtype=torch.float32)
        #rep_factors = self._get_repeat_factors(repeat_thresh)
        self._int_part = torch.trunc(rep_factors)
        self._frac_part = rep_factors - self._int_part
Ejemplo n.º 7
0
    def __init__(self, dataset, repeat_thresh, shuffle=True, seed=None):
        """
        Args:
            dataset (Dataset): dataset used for sampling.
            repeat_thresh (float): frequency threshold below which data is repeated.
            shuffle (bool): whether to shuffle the indices or not.
            seed (int): the initial seed of the shuffle. Must be the same
                across all workers. If None, will use a random seed shared
                among workers (require synchronization among all workers).
        """
        self._shuffle = shuffle
        if seed is None:
            seed = comm.shared_random_seed()
        self._seed = int(seed)

        self._rank = comm.get_rank()
        self._world_size = comm.get_world_size()

        dataset_dicts = []
        if hasattr(dataset, "datasets"):
            for d in dataset.datasets:
                dataset_dicts += d.dataset_dicts
        else:
            dataset_dicts = dataset.dataset_dicts

        # Get fractional repeat factors and split into whole number (_int_part)
        # and fractional (_frac_part) parts.
        rep_factors = self._get_repeat_factors(dataset_dicts, repeat_thresh)
        self._int_part = torch.trunc(rep_factors)
        self._frac_part = rep_factors - self._int_part
Ejemplo n.º 8
0
def trunc(input_):
    """Wrapper of `torch.trunc`.

    Parameters
    ----------
    input_ : DTensor
        Input dense tensor.
    """
    return torch.trunc(input_._data)
def test_appro():
    print("Enter test_appro")
    a = torch.tensor([-3.5, -3.1415, -3., 0.0, 3., 3.1415, 3.5])
    print("orig:", a)
    print("floor: ", torch.floor(a))
    print("ceil: ", torch.ceil(a))
    print("trunc: ", torch.trunc(a))
    print("frac: ", torch.frac(a))
    print("round: ", torch.round(a))
    print("Exit test_appro")
Ejemplo n.º 10
0
    def __init__(self, repeat_factors, *, shuffle=True, seed=None):
        self._shuffle = shuffle
        if seed is None:
            seed = comm.shared_random_seed()
        self._seed = int(seed)

        self._rank = comm.get_rank()
        self._world_size = comm.get_world_size()

        self._int_part = torch.trunc(repeat_factors)
        self._frac_part = repeat_factors - self._int_part
Ejemplo n.º 11
0
 def _f02rosenberg(self, f0_values, t1_ratio=0.4, t2_ratio=0.16):
     rad = f0_values / self.sampling_rate  # normalized frequency(0~2pi ->0~1)
     rad_cum = torch.cumsum(rad, 1)  # rad
     rad_cum = rad_cum - torch.trunc(rad_cum)  # rad within (0, 1)
     rosenberg = torch.zeros_like(rad_cum)
     ind1 = rad_cum < t1_ratio
     ind2 = (rad_cum >= t1_ratio) * (rad_cum < t1_ratio + t2_ratio)
     rosenberg[ind1] = 1.0 - torch.cos(rad_cum[ind1] / t1_ratio * np.pi)
     rosenberg[ind2] = torch.cos(
         (rad_cum[ind2] - t1_ratio) / t2_ratio * np.pi / 2)
     return rosenberg
def trunc(x):
    if callable(x):
        y = copy.copy(x)  # shallow copy

        def compute(*args, **kwargs):
            return torch.trunc(x(*args, **kwargs))

        y.compute = compute
        return y
    else:
        return torch.trunc(x)
Ejemplo n.º 13
0
    def __init__(self,
                 dataset,
                 config,
                 num_replicas=None,
                 rank=None,
                 shuffle=True):
        """
        Args:
            dataset: COCODataset.
            config:
                REPEAT_THRESHOLD (float): frequency used for control imgs per epoch
                MAX_REPEAT_TIMES (float) : max repeat times for single epoch
                MIN_REPEAT_TIMES (float) : min repeat times for single epoch
                POW(float): 0.5 for lvis paper sqrt ,1.0 for linear
            shuffle (bool): whether to shuffle the indices or not
        """
        self.shuffle = shuffle
        self.config = config
        if num_replicas is None:
            if not dist.is_available():
                raise RuntimeError(
                    "Requires distributed package to be available")
            num_replicas = dist.get_world_size()
        if rank is None:
            if not dist.is_available():
                raise RuntimeError(
                    "Requires distributed package to be available")
            rank = dist.get_rank()
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = int(
            math.ceil(len(dataset) * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas

        # Get per-image annotations list
        coco_json = dataset.coco
        img_bboxes = {}
        ids = dataset.ids  # or use dataset_dicts.id_to_img_map and get its value
        annotations = coco_json.anns
        for item_ in annotations:
            item = annotations[item_]
            img_bboxes.setdefault(item['image_id'], []).append(item)
        dataset_dict_img = []
        for img_id in ids:
            dataset_dict_img.append({"annotations": img_bboxes[img_id]})

        # Get fractional repeat factors and split into whole number (_int_part)
        # and fractional (_frac_part) parts.
        rep_factors = self._get_repeat_factors(dataset_dict_img)
        self._int_part = torch.trunc(rep_factors)
        self._frac_part = rep_factors - self._int_part
def phase_correlation(img, ref, upsample_factor=1):
    """
    An adaption of skimage.registration.phase_cross_correlation for Pytorch, enabling GPU support.
    Perform phase correlation to find relative translational shift.

    :param img: Tensor. In shape [y, x, 2], where the last dimension holds real and imaginary parts.
    :param ref: Tensor. In shape [y, x, 2], where the last dimension holds real and imaginary parts.
    :param upsample_factor: Int. Images will be registered to within `1 / upsample_factor` of a pixel.
    :return: Shift as [dy, dx]. It is the relative shift of img with regards to ref. In other words, you can shift
             img by -shifts to get ref.
    """
    img_shape = img.shape[:2]
    size = img_shape[0] * img_shape[1]
    f_img_real, f_img_imag = fft2(img[:, :, 0], img[:, :, 1])
    f_ref_real, f_ref_imag = fft2(ref[:, :, 0], ref[:, :, 1])
    prod_real, prod_imag = complex_mul(f_img_real, f_img_imag, f_ref_real,
                                       -f_ref_imag)
    cc_real, cc_imag = ifft2(prod_real, prod_imag)
    cc = cc_real**2 + cc_imag**2
    shifts = tc.argmax(cc)
    shifts = tc.tensor([shifts // img_shape[1], shifts % img_shape[1]],
                       device=img.device).float()

    if upsample_factor > 1:
        # Initial shift estimate in upsampled grid
        shifts = tc.round(shifts * upsample_factor) / upsample_factor
        upsampled_region_size = np.ceil(upsample_factor * 1.5)
        upsampled_region_size = tc.tensor(
            [upsampled_region_size, upsampled_region_size], device=img.device)
        # Center of output array at dftshift + 1
        dftshift = tc.trunc(upsampled_region_size / 2.0)
        normalization = (size * upsample_factor**2)
        # Matrix multiply DFT around the current shift estimate
        sample_region_offset = dftshift - shifts * upsample_factor
        cc_real, cc_imag = _upsampled_dft(prod_real, -prod_imag,
                                          upsampled_region_size,
                                          upsample_factor,
                                          sample_region_offset)
        cc_imag = -cc_imag
        cc_real /= normalization
        cc_imag /= normalization
        # Locate maximum and map back to original pixel grid
        maxima = tc.argmax(cc_real**2 + cc_imag**2)
        maxima = [maxima // cc_real.shape[1], maxima % cc_real.shape[1]]

        maxima = tc.tensor(maxima, device=shifts.device) - dftshift

        shifts = shifts + maxima / upsample_factor
    return shifts
Ejemplo n.º 15
0
def build_masks(imgs, boxes, obj_to_img):
    H, W = imgs.shape[2], imgs.shape[3]
    masks = torch.zeros((imgs.shape[0], 1, H, W))

    # create masks
    # print("MASK STATS")
    # print("BOXES: " + str(boxes.shape))
    # print("OBJ TO IMG: " + str(obj_to_img.shape))
    for i in range(boxes.shape[0]):
        img_ind = obj_to_img[i]

        box = torch.trunc(boxes[i] * H).int()
        masks[img_ind, :, box[1]:box[3] + 1, box[0]:box[2] + 1] = 1

    # print("MASKS: " + str(masks.shape))
    return masks
Ejemplo n.º 16
0
    def img_tranform_3(self, img):

        # window width and level of lung CT
        center = -500
        width = 1500

        min = (2 * center - width) / 2.0 + 0.5
        max = (2 * center + width) / 2.0 + 0.5
        dFactor = 255.0 / (max - min)
        img = img - min
        img = torch.trunc(img * dFactor)
        img[img < 0.0] = 0
        img[img > 255.0] = 255

        img /= 255.
        return img
Ejemplo n.º 17
0
    def forward(self, x):
        # The input x is a series of random numbers of size k x 2
        # You should use these random numbers to compute and return pi using pytorch

        #        print('original vector:')
        #        print(x)
        x = x[:, 0]**2 + x[:, 1]**2
        #        print('new vector:')
        #        print(x)
        x = torch.trunc(x)
        #x_thresholded = torch.where(x < 1.0, torch.Tensor([1.0]), torch.Tensor([0.0]))
        #        print('threshold vector')
        #        print(x)
        x = (1 - torch.mean(x)) * 4
        #        print('estimate of pi')
        #        print(x)
        return x
Ejemplo n.º 18
0
    def __init__(self,
                 dataset,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        _rank, _num_replicas = get_dist_info()
        if num_replicas is None:
            num_replicas = _num_replicas
        if rank is None:
            rank = _rank
        '''
        ['CLASSES', '__add__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', 
        '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__len__', '__lt__', '__module__', 
        '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', 
        '_filter_imgs', '_parse_ann_info', '_rand_another', '_set_group_flag', 'ann_file', 'cat2label', 'cat_ids', 'coco', 'data_root', 'filter_empty_gt', 
        'flag', 'get_ann_info', 'img_ids', 'img_infos', 'img_prefix', 'load_annotations', 'load_proposals', 'pipeline', 'pre_pipeline', 'prepare_test_img', 
        'prepare_train_img', 'proposal_file', 'proposals', 'seg_prefix', 'test_mode']
        '''
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas  # gpu num
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        # self.group_sizes = np.bincount(self.flag)

        rep_factors = self._get_repeat_factors()
        self._int_part = torch.trunc(rep_factors)
        self._frac_part = rep_factors - self._int_part
        g2 = torch.Generator()
        g2.manual_seed(2)
        self.resampled_indices = np.array(self._get_epoch_indices(g2))
        self.flag = self.flag[self.resampled_indices]
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0  # samples on one gpu
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas
    def __init__(self, repeat_factors, *, shuffle=True, seed=None):
        """
        Args:
            repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
                full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
            shuffle (bool): whether to shuffle the indices or not
            seed (int): the initial seed of the shuffle. Must be the same
                across all workers. If None, will use a random seed shared
                among workers (require synchronization among all workers).
        """
        self._shuffle = shuffle
        if seed is None:
            seed = comm.shared_random_seed()
        self._seed = int(seed)

        self._rank = comm.get_rank()
        self._world_size = comm.get_world_size()

        # Split into whole number (_int_part) and fractional (_frac_part) parts.
        self._int_part = torch.trunc(repeat_factors)
        self._frac_part = repeat_factors - self._int_part
Ejemplo n.º 20
0
    def __init__(self,
                 dataset,
                 repeat_factors,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        """
        Args:
            repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
                full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
        """

        if num_replicas is None:
            num_replicas = get_world_size()
        if rank is None:
            rank = get_rank()
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas
        ###

        self._world_size = comm.get_world_size()
        ##

        # Split into whole number (_int_part) and fractional (_frac_part) parts.
        self._int_part = torch.trunc(repeat_factors)
        self._frac_part = repeat_factors - self._int_part
Ejemplo n.º 21
0
def qualities_to_scale_factors(qualities: Tensor) -> Tensor:
    r"""
    Converts a batch of qualities in [0, 100] to a batch of scale factors suitable for scaling one of the IJG reference quantization matrices.

    Args:
        qualities (Tensor): A single dimensional batch of qualities.

    Returns:
        Tensor: A single dimensional batch of scale factors.
    """
    qualities = qualities.clone()
    qualities[qualities <= 0] = 1
    qualities[qualities > 100] = 100

    indices_0_50 = qualities < 50
    indices_50_100 = qualities >= 50

    qualities[indices_0_50] = 5000 // qualities[indices_0_50]
    qualities[indices_50_100] = torch.trunc(200 -
                                            qualities[indices_50_100] * 2)

    return qualities
Ejemplo n.º 22
0
    def __init__(self, dataset_dicts, repeat_thresh, shuffle=True, seed=None):
        """
        Args:
            dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
            repeat_thresh (float): frequency threshold below which data is repeated.
            shuffle (bool): whether to shuffle the indices or not
            seed (int): the initial seed of the shuffle. Must be the same
                across all workers. If None, will use a random seed shared
                among workers (require synchronization among all workers).
        """
        self._shuffle = shuffle
        if seed is None:
            seed = comm.shared_random_seed()
        self._seed = int(seed)

        self._rank = comm.get_rank()
        self._world_size = comm.get_world_size()

        # Get fractional repeat factors and split into whole number (_int_part)
        # and fractional (_frac_part) parts.
        rep_factors = self._get_repeat_factors(dataset_dicts, repeat_thresh)
        self._int_part = torch.trunc(rep_factors)
        self._frac_part = rep_factors - self._int_part
Ejemplo n.º 23
0
    def align_angle_c4(angle_map, return_tensor=False):
        """
        [-180, -90) -> 0
        [-90, 0) -> 1
        [0, 90) -> 2
        [90, 180) -> 3
        """

        if return_tensor:
            assert isinstance(angle_map, torch.Tensor)
        else:
            angle_map = torch.from_numpy(angle_map)

        angle_index_map = torch.trunc((angle_map + 180) / 90).long()
        angle_index_map = torch.clamp(angle_index_map, min=0, max=3)

        new_angle_map = (angle_index_map * 90 - 135).float()

        if not return_tensor:
            new_angle_map = new_angle_map.numpy()
            angle_index_map = angle_index_map.numpy()

        return new_angle_map, angle_index_map
            # wrap them in Variable
            inputs, labels = Variable(inputs.cuda().float()), Variable(
                labels.cuda().float())
            domain_inputs, domain_labels = Variable(
                domain_inputs.cuda().float()), Variable(
                    domain_labels.cuda().float())

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            class_outputs, domain_outputs = net('train', inputs, domain_inputs,
                                                l, domain_labels)

            domain_labels = domain_labels.squeeze()
            domain_preds = torch.trunc(
                2 * F.sigmoid(domain_outputs).data)  # not used for loss
            correct_domain = domain_labels.data
            domain_counts += len(domain_preds)
            domain_epoch_corrects += torch.sum(
                domain_preds == correct_domain).item()

            class_loss = class_criterion(class_outputs, labels)
            domain_loss = domain_criterion(domain_outputs, domain_labels)
            loss = class_loss + loss_weight * domain_loss
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.data[0]

        # ---------------------------------------
 def compute(*args, **kwargs):
     return torch.trunc(x(*args, **kwargs))
Ejemplo n.º 26
0
def training_loop(dataloader_X, dataloader_Y, test_dataloader_X,
                  test_dataloader_Y, device, opts):
    """Runs the training loop.
        * Saves checkpoint every opts.checkpoint_every iterations
        * Saves generated samples every opts.sample_every iterations
    """

    # Create generators and discriminators
    G_XtoY, G_YtoX, D_X, D_Y = create_model(opts)
    G_XtoY.to(device)
    G_YtoX.to(device)
    D_X.to(device)
    D_Y.to(device)

    if device.type == 'cuda' and num_workers > 0:
        G_XtoY = nn.DataParallel(G_XtoY, list(range(num_workers)))
        G_YtoX = nn.DataParallel(G_YtoX, list(range(num_workers)))
        D_X = nn.DataParallel(D_X, list(range(num_workers)))
        D_Y = nn.DataParallel(D_Y, list(range(num_workers)))

    g_params = list(G_XtoY.parameters()) + list(
        G_YtoX.parameters())  # Get generator parameters
    d_params = list(D_X.parameters()) + list(
        D_Y.parameters())  # Get discriminator parameters

    # Create optimizers for the generators and discriminators
    g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2])
    d_optimizer = optim.Adam(d_params, opts.lr, [opts.beta1, opts.beta2])

    iter_X = iter(dataloader_X)
    iter_Y = iter(dataloader_Y)

    test_iter_X = iter(test_dataloader_X)
    test_iter_Y = iter(test_dataloader_Y)

    # Get some fixed data from domains X and Y for sampling. These are images that are held
    # constant throughout training, that allow us to inspect the model's performance.
    fixed_X = test_iter_X.next()[0].to(device)
    fixed_Y = test_iter_Y.next()[0].to(device)

    iter_per_epoch = min(len(iter_X), len(iter_Y))

    mse_loss = torch.nn.MSELoss()

    d_real_losses = []
    D_Y_losses = []
    d_fake_losses = []
    D_X_losses = []
    g_losses = []
    for iteration in range(1, opts.train_iters + 1):

        # Reset data_iter for each epoch
        if iteration % iter_per_epoch == 0:
            iter_X = iter(dataloader_X)
            iter_Y = iter(dataloader_Y)

        images_X = iter_X.next()[0].to(device)
        images_Y = iter_Y.next()[0].to(device)

        # ============================================
        #            TRAIN THE DISCRIMINATORS
        # ============================================

        #########################################
        ##             FILL THIS IN            ##
        #########################################

        # Train with real images

        # 1. Compute the discriminator losses on real images
        # D_X_loss = ...
        # D_Y_loss = ...
        real_output_X = D_X(images_X).reshape((-1, 1))
        real_output_Y = D_Y(images_Y).reshape((-1, 1))
        var_Value = var_label * torch.ones(opts.batch_size, 1).to(device)
        refSeq = 1 - torch.trunc(
            1.1 * torch.rand(opts.batch_size, 1)).to(device)
        real_labels = torch.normal(mean=1, std=var_Value).mul(refSeq)

        D_X_loss = mse_loss(real_output_X, real_labels)
        D_Y_loss = mse_loss(real_output_Y, real_labels)

        d_real_loss = D_X_loss + D_Y_loss
        d_real_loss.backward()
        d_optimizer.step()

        # Train with fake images
        d_optimizer.zero_grad()

        # 2. Generate fake images that look like domain X based on real images in domain Y
        # fake_X = ...
        fake_X = G_YtoX(images_Y)

        # 3. Compute the loss for D_X
        # D_X_loss = ...
        fake_labels = torch.zeros(opts.batch_size, 1).to(device)
        fake_output_X = D_X(fake_X).reshape((-1, 1))
        D_X_loss = mse_loss(fake_output_X, fake_labels)

        # 4. Generate fake images that look like domain Y based on real images in domain X
        # fake_Y = ...
        fake_Y = G_XtoY(images_X)

        # 5. Compute the loss for D_Y
        # D_Y_loss = ...
        fake_output_Y = D_Y(fake_Y).reshape((-1, 1))
        D_Y_loss = mse_loss(fake_output_Y, fake_labels)

        d_fake_loss = D_X_loss + D_Y_loss
        d_fake_loss.backward()
        d_optimizer.step()

        # =========================================
        #            TRAIN THE GENERATORS
        # =========================================

        #########################################
        ##    FILL THIS IN: Y--X-->Y CYCLE     ##
        #########################################
        g_optimizer.zero_grad()

        # 1. Generate fake images that look like domain X based on real images in domain Y
        # fake_X = ...
        fake_X = G_YtoX(images_Y)

        # 2. Compute the generator loss based on domain X
        # g_loss = ...
        fake_output_X = D_X(fake_X).reshape((-1, 1))
        g_loss = mse_loss(fake_output_X, real_labels)

        # 3. Compute the cycle consistency loss (the reconstruction loss)
        # cycle_consistency_loss = ...
        re_Y = G_XtoY(fake_X)
        cycle_consistency_loss = mse_loss(re_Y, images_Y)

        g_loss += cycle_consistency_loss

        g_loss.backward()
        g_optimizer.step()

        #########################################
        ##    FILL THIS IN: X--Y-->X CYCLE     ##
        #########################################

        g_optimizer.zero_grad()

        # 1. Generate fake images that look like domain Y based on real images in domain X
        # fake_Y = ...
        fake_Y = G_XtoY(images_X)

        # 2. Compute the generator loss based on domain Y
        # g_loss = ...
        fake_output_Y = D_Y(fake_Y).reshape((-1, 1))
        g_loss = mse_loss(fake_output_Y, real_labels)

        # 3. Compute the cycle consistency loss (the reconstruction loss)
        # cycle_consistency_loss = ...
        re_X = G_YtoX(fake_Y)
        cycle_consistency_loss = mse_loss(re_X, images_X)

        g_loss += cycle_consistency_loss

        g_loss.backward()
        g_optimizer.step()

        # Print the log info
        if iteration % opts.log_step == 0:
            print(
                'Iteration [{:5d}/{:5d}] | d_real_loss: {:6.4f} | d_Y_loss: {:6.4f} | d_X_loss: {:6.4f} | '
                'd_fake_loss: {:6.4f} | g_loss: {:6.4f}'.format(
                    iteration, opts.train_iters, d_real_loss.item(),
                    D_Y_loss.item(), D_X_loss.item(), d_fake_loss.item(),
                    g_loss.item()))
            d_real_losses.append(d_real_loss.item())
            D_Y_losses.append(D_Y_loss.item())
            d_fake_losses.append(d_fake_loss.item())
            D_X_losses.append(D_X_loss.item())
            g_losses.append(g_loss.item())

        # Save the generated samples
        if iteration % opts.sample_every == 0:
            save_samples(iteration, fixed_Y, fixed_X, G_YtoX, G_XtoY, opts)

        # Save the model parameters
        if iteration % opts.checkpoint_every == 0:
            checkpoint(iteration, G_XtoY, G_YtoX, D_X, D_Y, opts)

    epochs = list(range(0, len(d_real_losses)))
    plt.figure()
    plt.plot(epochs, d_real_losses, color='cyan', label='d_real_loss')
    plt.plot(epochs, D_Y_losses, color='purple', label='D_Y_loss')
    plt.plot(epochs, d_fake_losses, color='pink', label='d_fake_loss')
    plt.plot(epochs, D_X_losses, color='yellow', label='D_X_loss')
    plt.plot(epochs, g_losses, color='magenta', label='g_loss')
    plt.legend(loc='upper left')
    plt.xlabel('Iterations')
    plt.ylabel('Loss')
    plt.grid()
    plt.savefig('cycle-plot')
    plt.show()
Ejemplo n.º 27
0
 def test_trunc(x, y):
     c = torch.trunc(torch.add(x, y))
     return c
Ejemplo n.º 28
0
 def pointwise_ops(self):
     a = torch.randn(4)
     b = torch.randn(4)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
     t = torch.tensor([-1, -2, 3], dtype=torch.int8)
     s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
     f = torch.zeros(3)
     g = torch.tensor([-1, 0, 1])
     w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
     return (
         torch.abs(torch.tensor([-1, -2, 3])),
         torch.absolute(torch.tensor([-1, -2, 3])),
         torch.acos(a),
         torch.arccos(a),
         torch.acosh(a.uniform_(1.0, 2.0)),
         torch.add(a, 20),
         torch.add(a, torch.randn(4, 1), alpha=10),
         torch.addcdiv(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.addcmul(torch.randn(1, 3),
                       torch.randn(3, 1),
                       torch.randn(1, 3),
                       value=0.1),
         torch.angle(a),
         torch.asin(a),
         torch.arcsin(a),
         torch.asinh(a),
         torch.arcsinh(a),
         torch.atan(a),
         torch.arctan(a),
         torch.atanh(a.uniform_(-1.0, 1.0)),
         torch.arctanh(a.uniform_(-1.0, 1.0)),
         torch.atan2(a, a),
         torch.bitwise_not(t),
         torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
         torch.ceil(a),
         torch.clamp(a, min=-0.5, max=0.5),
         torch.clamp(a, min=0.5),
         torch.clamp(a, max=0.5),
         torch.clip(a, min=-0.5, max=0.5),
         torch.conj(a),
         torch.copysign(a, 1),
         torch.copysign(a, b),
         torch.cos(a),
         torch.cosh(a),
         torch.deg2rad(
             torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0,
                                                              -90.0]])),
         torch.div(a, b),
         torch.divide(a, b, rounding_mode="trunc"),
         torch.divide(a, b, rounding_mode="floor"),
         torch.digamma(torch.tensor([1.0, 0.5])),
         torch.erf(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
         torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
         torch.exp(torch.tensor([0.0, math.log(2.0)])),
         torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
         torch.expm1(torch.tensor([0.0, math.log(2.0)])),
         torch.fake_quantize_per_channel_affine(
             torch.randn(2, 2, 2),
             (torch.randn(2) + 1) * 0.05,
             torch.zeros(2),
             1,
             0,
             255,
         ),
         torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
         torch.float_power(torch.randint(10, (4, )), 2),
         torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4,
                                                             -5])),
         torch.floor(a),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
         # torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
         torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
         torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.frac(torch.tensor([1.0, 2.5, -3.2])),
         torch.randn(4, dtype=torch.cfloat).imag,
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
         torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
         torch.lerp(torch.arange(1.0, 5.0),
                    torch.empty(4).fill_(10), 0.5),
         torch.lerp(
             torch.arange(1.0, 5.0),
             torch.empty(4).fill_(10),
             torch.full_like(torch.arange(1.0, 5.0), 0.5),
         ),
         torch.lgamma(torch.arange(0.5, 2, 0.5)),
         torch.log(torch.arange(5) + 10),
         torch.log10(torch.rand(5)),
         torch.log1p(torch.randn(5)),
         torch.log2(torch.rand(5)),
         torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([-100.0, -200.0, -300.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp(torch.tensor([1.0, 2000.0, 30000.0]),
                         torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([-100.0, -200.0, -300.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logaddexp2(torch.tensor([1.0, 2000.0, 30000.0]),
                          torch.tensor([-1, -2, -3])),
         torch.logical_and(r, s),
         torch.logical_and(r.double(), s.double()),
         torch.logical_and(r.double(), s),
         torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
         torch.logical_not(
             torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
         torch.logical_not(
             torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
             out=torch.empty(3, dtype=torch.int16),
         ),
         torch.logical_or(r, s),
         torch.logical_or(r.double(), s.double()),
         torch.logical_or(r.double(), s),
         torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logical_xor(r, s),
         torch.logical_xor(r.double(), s.double()),
         torch.logical_xor(r.double(), s),
         torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
         torch.logit(torch.rand(5), eps=1e-6),
         torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
         torch.i0(torch.arange(5, dtype=torch.float32)),
         torch.igamma(a, b),
         torch.igammac(a, b),
         torch.mul(torch.randn(3), 100),
         torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
         torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
         torch.tensor([float("nan"),
                       float("inf"), -float("inf"), 3.14]),
         torch.nan_to_num(w),
         torch.nan_to_num(w, nan=2.0),
         torch.nan_to_num(w, nan=2.0, posinf=1.0),
         torch.neg(torch.randn(5)),
         # torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
         torch.polygamma(1, torch.tensor([1.0, 0.5])),
         torch.polygamma(2, torch.tensor([1.0, 0.5])),
         torch.polygamma(3, torch.tensor([1.0, 0.5])),
         torch.polygamma(4, torch.tensor([1.0, 0.5])),
         torch.pow(a, 2),
         torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
         torch.rad2deg(
             torch.tensor([[3.142, -3.142], [6.283, -6.283],
                           [1.570, -1.570]])),
         torch.randn(4, dtype=torch.cfloat).real,
         torch.reciprocal(a),
         torch.remainder(torch.tensor([-3.0, -2.0]), 2),
         torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
         torch.round(a),
         torch.rsqrt(a),
         torch.sigmoid(a),
         torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sgn(a),
         torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
         torch.sin(a),
         torch.sinc(a),
         torch.sinh(a),
         torch.sqrt(a),
         torch.square(a),
         torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
         torch.tan(a),
         torch.tanh(a),
         torch.trunc(a),
         torch.xlogy(f, g),
         torch.xlogy(f, g),
         torch.xlogy(f, 4),
         torch.xlogy(2, g),
     )
Ejemplo n.º 29
0
def test_tlutconv2d():
    plot_en = False

    hwcfg = {
        "temporal": "w",
        "widtht": 4,
        "formati": "bfloat16",
        "widthi": 12,
        "quantilei": 1,
        "formatw": "bfloat16",
        "widthw": 12,
        "quantilew": 1,
        "cycle": None,
        "rounding": "round",
        "signmag": True
    }

    if hwcfg["formati"] == "bfloat16":
        dtype = torch.bfloat16
    elif hwcfg["formati"] == "float16":
        dtype = torch.float16
    elif hwcfg["formati"] == "float32":
        dtype = torch.float32
    else:
        if hwcfg["formatw"] == "bfloat16":
            dtype = torch.bfloat16
        elif hwcfg["formatw"] == "float16":
            dtype = torch.float16
        else:
            dtype = torch.float32

    in_channels = 32
    out_channels = 16
    kernel_size = (3, 3)
    stride = 2
    padding = 0
    dilation = 1
    groups = 1
    bias = True
    padding_mode = 'zeros'

    total_bit = 8
    input_int_bit = 0
    input_fra_bit = total_bit - input_int_bit

    batch = 32
    input_size = (128, 32)
    input = (
        (torch.rand(batch, in_channels, input_size[0], input_size[1]) - 0.5) *
        2).to(device).type(dtype)
    if hwcfg["formati"] == "fxp":
        input = torch.trunc(
            input << hwcfg["widthi"]).round() >> hwcfg["widthi"]
    input = input << input_int_bit

    conv2d = torch.nn.Conv2d(in_channels,
                             out_channels,
                             kernel_size,
                             stride,
                             padding,
                             dilation,
                             groups,
                             bias,
                             padding_mode,
                             dtype=dtype).to(device)
    if hwcfg["formatw"] == "fxp":
        conv2d.weight.data = torch.trunc(
            conv2d.weight << hwcfg["widthw"]).round() >> hwcfg["widthw"]
        if bias:
            conv2d.bias.data = torch.trunc(
                conv2d.bias << hwcfg["widthw"]).round() >> hwcfg["widthw"]

    conv2d_o = conv2d(input)

    uconv2d = TLUTConv2d(in_channels,
                         out_channels,
                         kernel_size,
                         stride,
                         padding,
                         dilation,
                         groups,
                         bias,
                         padding_mode,
                         weight_ext=conv2d.weight.data,
                         bias_ext=conv2d.bias,
                         hwcfg=hwcfg).to(device)
    uconv2d_o = uconv2d(input)
    print(uconv2d.hwcfg)

    conv2d_o.abs().mean().backward()
    uconv2d_o.abs().mean().backward()

    diff = (uconv2d_o - conv2d_o)
    print()
    print("diff max:", diff.max())
    print("diff min:", diff.min())
    print("diff mean:", diff.mean())
    print("diff rmse:", torch.sqrt(torch.mean(torch.square(diff))))

    diff_grad = (uconv2d.weight.grad - conv2d.weight.grad)
    print()
    print("diff grad max:", diff_grad.max())
    print("diff grad min:", diff_grad.min())
    print("diff grad mean:", diff_grad.mean())
    print("diff grad rmse:", torch.sqrt(torch.mean(torch.square(diff_grad))))

    if plot_en:
        fig = plt.hist(diff.cpu().detach().numpy().flatten(),
                       bins='auto')  # arguments are passed to np.histogram
        plt.title("Histogram for output error")
        plt.show()

        fig = plt.hist(diff_grad.cpu().detach().numpy().flatten(),
                       bins='auto')  # arguments are passed to np.histogram
        plt.title("Histogram for grad error")
        plt.show()
Ejemplo n.º 30
0
    parser.add_argument('--out', required=True)
    parser.add_argument('--root_dir', required=True)
    parser.add_argument('--ext', default='jpg')
    parser.add_argument('--quant_level', type=int, default=2)
    parser.add_argument('--resize', type=int, default=220)

    args = parser.parse_args()

    transform = torchvision.transforms.Compose([
        torchvision.transforms.Resize((args.resize, args.resize)),
        torchvision.transforms.ToTensor(),
        torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])

    if not os.path.exists(args.out):
        os.makedirs(args.out)

    files = [f for f in os.listdir(args.root_dir) if f.endswith(args.ext)]

    for f in files:
        img_name = os.path.join(args.root_dir, f)
        image = Image.open(img_name).convert('L')
        image = ImageOps.invert(image)
        sample = transform(image)

        quant = torch.trunc((-0.01 + sample) * args.quant_level)
        quant = quant / args.quant_level

        out_name = os.path.join(args.out, f.replace(args.ext, "png"))
        print(out_name)
        vutils.save_image(quant, out_name, normalize=True)