Exemplo n.º 1
0
    def dechunk_block_tensor_concatenated_along_batch_dimension(
            self, tensor: torch.tensor):
        number_of_examples = int(
            tensor.size(0) / self.number_of_feature_blocks_per_example)

        # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - tensor.grad_fn "
        #      + str(tensor.grad_fn))

        # print("tensor.size(): " + str(tensor.size()))
        channels = tensor.size(1)

        tensor_grouped_by_block = tensor.view(
            self.number_of_feature_blocks_per_example, number_of_examples,
            channels, self.block_size.height, self.block_size.width)

        tensor_block_row = self.reconstruct_tensor_block_row(
            tensor_grouped_by_block, 0)
        result = tensor_block_row

        for row_index in range(1, self.blocks_per_column):
            tensor_block_row = self.reconstruct_tensor_block_row(
                tensor_grouped_by_block, row_index)
            result = torch.cat((result, tensor_block_row), 2)

        # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - result.grad_fn "
        #      + str(result.grad_fn))

        return result
Exemplo n.º 2
0
    def dechunk_block_tensor_concatenated_along_batch_dimension_breaks_gradient(
            self, tensor: torch.tensor):
        number_of_examples = int(
            tensor.size(0) / self.number_of_feature_blocks_per_example)

        # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - tensor.grad_fn "
        #       + str(tensor.grad_fn))

        # print("tensor.size(): " + str(tensor.size()))
        channels = tensor.size(1)

        tensor_grouped_by_block = tensor.view(
            self.number_of_feature_blocks_per_example, number_of_examples,
            channels, self.block_size.height, self.block_size.width)

        result = torch.zeros(number_of_examples, channels,
                             self.original_size.height,
                             self.original_size.width)

        # print("tensor.nelement(): " + str(tensor.nelement()))
        # print("resuls.nelement(): " + str(result.nelement()))
        if Utils.use_cuda():
            # https://discuss.pytorch.org/t/which-device-is-model-tensor-stored-on/4908/7
            device = tensor.get_device()
            result = result.to(device)

        # print("tensor_grouped_by_block.size(): " + str(tensor_grouped_by_block.size()))
        for block_index in range(0, tensor_grouped_by_block.size(0)):
            # print("i: " + str(block_index))

            height_span_begin, height_span_end = self.height_span(block_index)
            width_span_begin, width_span_end = self.width_span(block_index)
            # print("height_span: " + str(height_span_begin) + ":"  + str(height_span_end))
            # print("width_span: " + str(width_span_begin) + ":" + str(width_span_end))

            # print("tensor_grouped_by_block[block_index, :, :, :]:" + str(
            #    tensor_grouped_by_block[block_index, :, :, :]))

            # Fixme: possibly copying like this destroys the gradient, as the grad_fn function of result
            # shows" result.grad_fn <CopySlices object at 0x7f211cbfa208>
            # instead of something like "<TanhBackward object" , "<CatBackward object"...
            # Probably "cat" should be used to reconstruct the original configuration
            # row by row. This was used previously also in the "extract_unskewed_activations"
            # function
            result[:, :, height_span_begin:height_span_end,
            width_span_begin:width_span_end] = \
                tensor_grouped_by_block[block_index, :, :, :]

        # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - result.grad_fn "
        #      + str(result.grad_fn))

        return result
    def chunk_tensor_list_into_blocks_concatenate_along_batch_same_height_groups(
            self, tensor_list: list):
        current_same_height_tensors_height = tensor_list[0].size(1)
        same_height_tensors = list([])

        cat_list = list([])

        for tensor in tensor_list:
            height = tensor.size(1)
            if height != current_same_height_tensors_height:
                same_height_tensor_blocks_tensor = self.\
                    chunk_tensor_list_into_blocks_concatenate_along_batch_dimension_cat_once_fast(
                        same_height_tensors)
                cat_list.append(same_height_tensor_blocks_tensor)
                same_height_tensors = list([])

            same_height_tensors.append(tensor)

        # Add last element
        same_height_tensor_blocks_tensor = self. \
            chunk_tensor_list_into_blocks_concatenate_along_batch_dimension_cat_once_fast(
                same_height_tensors)
        cat_list.append(same_height_tensor_blocks_tensor)

        return torch.cat(cat_list, 0)
Exemplo n.º 4
0
    def chunk_tensor_into_blocks_concatenate_along_batch_dimension_no_cat(
            self, tensor: torch.tensor):

        tensor_split_on_height = torch.split(tensor, self.block_size.height, 2)

        # New implementation: completely without use of cat
        # https://discuss.pytorch.org/t/best-way-to-split-process-merge/18702
        total_blocks = self.blocks_per_column * self.blocks_per_row
        batch_size = tensor.size(0)
        # The height in the batch dimension must be such that it fits all stacked
        # blocks, i.e. stacked in a single column, and also keeping the batch dimension
        height_in_batch_dimension = total_blocks * batch_size
        print("height in batch dimension: " + str(height_in_batch_dimension))

        if Utils.use_cuda():
            device = tensor.get_device()
            with torch.cuda.device(device):
                # creating the zeros directly on the gpu, which is faster
                # See: https://discuss.pytorch.org/t/creating-tensors-on-gpu-directly/2714/5
                result = torch.cuda.FloatTensor(height_in_batch_dimension,
                                                tensor.size(1),
                                                self.block_size.height,
                                                self.block_size.width).fill_(0)
        else:
            result = torch.FloatTensor(height_in_batch_dimension,
                                       tensor.size(1), self.block_size.height,
                                       self.block_size.width).fill_(0)
        index = 0
        for row_block in tensor_split_on_height:
            blocks = torch.split(row_block, self.block_size.width, 3)
            for column_block in blocks:
                # print("column_block.size(): " + str(column_block.size()))
                # print("result.size(): " + str(result.size()))
                # print("result slice.size() : " +
                #      str(result[index * batch_size:((index + 1) * batch_size),
                #                 :, :, :].size())
                #      )
                # https://discuss.pytorch.org/t/best-way-to-split-process-merge/18702
                result[index * batch_size:((index + 1) *
                                           batch_size), :, :, :] = column_block
        return result
    def dechunk_block_tensor_concatenated_along_batch_dimension_changed_block_size(
            self, tensor: torch.tensor, block_size: SizeTwoDimensional):
        time_start = util.timing.date_time_now()

        number_of_examples = len(self.original_sizes)

        # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - tensor.grad_fn "
        #      + str(tensor.grad_fn))

        # print("tensor.size(): " + str(tensor.size()))
        channels = tensor.size(1)

        result = list([])

        blocks_per_column_list, blocks_per_row_list, blocks_for_examples_list = \
            self.get_number_of_blocks_for_examples()

        # print("Total blocks for examples: " + str(sum(blocks_for_examples_list)))
        # print("tensor.size(): " + str(tensor.size()))
        example_sub_tensors = torch.split(tensor, blocks_for_examples_list, 0)

        blocks_start_index = 0
        for example_index in range(0, number_of_examples):
            blocks_per_column = blocks_per_column_list[example_index]
            # blocks_per_row = blocks_per_row_list[example_index]
            blocks_for_example = blocks_for_examples_list[example_index]
            example_sub_tensor = example_sub_tensors[example_index]
            # print("example_sub_tensor: " + str(example_sub_tensor))

            tensor_grouped_by_block = example_sub_tensor.view(
                blocks_for_example, channels, block_size.height,
                block_size.width)
            # reconstructed_example_tensor = TensorListChunking.reconstruct_tensor_row_by_row(tensor_grouped_by_block,
            #                                                                                 blocks_per_column,
            #                                                                                 blocks_per_row)
            reconstructed_example_tensor = TensorListChunking.reconstruct_tensor_cat_split_cat(
                tensor_grouped_by_block, blocks_per_column)
            result.append(reconstructed_example_tensor)

            # print(">>> dechunk_block_tensor_concatenated_along_batch_dimension: - result.grad_fn "
            #      + str(result.grad_fn))

            blocks_start_index += blocks_for_example

        # print("dechunk_block_tensor_concatenated_along_batch_dimension_changed_block_size - time used: \n" +
        #      str(util.timing.milliseconds_since(time_start)) + " milliseconds.")

        return result
    def group_examples_by_height(tensor_list):

        dictionary = OrderedDict([])
        for index, tensor in enumerate(tensor_list):
            height = tensor.size(1)
            if height in dictionary:
                same_height_list, original_indices_for_height_list = dictionary[
                    height]
                same_height_list.append(tensor)
                original_indices_for_height_list.append(index)
            else:
                tensor_indices_tuple = list([tensor]), list([index])
                dictionary[height] = tensor_indices_tuple

        reordered_elements_list = list([])
        original_indices = list([])

        for height in dictionary.keys():
            same_height_list, original_indices_for_height_list = dictionary[
                height]
            reordered_elements_list.extend(same_height_list)
            original_indices.extend(original_indices_for_height_list)

        return reordered_elements_list, original_indices
Exemplo n.º 7
0
    def __init__(self, tensors, transform=None):

        assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
        self.tensors = tensors
        self.transform = transform
Exemplo n.º 8
0
def getSparsity(tensor):
    shape=tensor.size()
    tensor = tensor.flatten()
    sparsity = float((tensor==0).sum())/int(tensor.size(0))
    return sparsity
 def tensor_block_height(self, tensor):
     return int(tensor.size(1) / self.block_size.height)
 def tensor_block_width(self, tensor):
     return int(tensor.size(2) / self.block_size.width)