示例#1
0
 def forward(self, input_tensor: Tensor_Torch):
     input_linked_tensor = input_tensor.get_linked_tensor()
     output_linked_tensor = input_linked_tensor.view(
         input_linked_tensor.shape[0], -1)
     if self.inplace_forward:
         input_tensor.set_linked_tensor(output_linked_tensor)
     return output_linked_tensor
示例#2
0
 def __init__(self,
              tensor_path: str,
              name: str = "TensorConstant_Torch",
              device=torch.device("cpu")):
     super(TensorConstant_Torch, self).__init__(name)
     self.linked_tensor_torch = Tensor_Torch(
         torch.load(tensor_path).to(device),
         name=self.name + "_saved_tensor")
示例#3
0
 def set_output_port(self, number: int):
     for i in range(number):
         self.outputMapping.append(
             Tensor_Torch(linked_tensor=None,
                          name=self.name + "_output_" + str(i + 1)))
         self.labelMapping.append(
             Tensor_Torch(linked_tensor=None,
                          name=self.name + "_label_" + str(i + 1)))
示例#4
0
 def __init__(self,
              view: list,
              name: str = "RandomeConstant_Torch",
              device=torch.device("cpu")):
     super(RandomConstant_Torch, self).__init__(name)
     self.linked_tensor_torch = Tensor_Torch(torch.randn(*view,
                                                         device=device),
                                             name=self.name +
                                             "_random_tensor")
示例#5
0
 def __init__(self,
              view: list,
              value: int,
              name: str = "ConstantConstant_Torch",
              device=torch.device("cpu")):
     super(ConstantConstant_Torch, self).__init__(name)
     self.linked_tensor_torch = Tensor_Torch(
         torch.add(torch.zeros(*view, device=device), value),
         name=self.name + "const_tensor")
示例#6
0
 def forward(self, input_tensor: Tensor_Torch):
     self.mean = self.mean.to(input_tensor.get_device())
     self.std = self.std.to(input_tensor.get_device())
     input_linked_tensor = input_tensor.get_linked_tensor()
     if self.inplace_forward:
         input_linked_tensor.sub_(self.mean).div_(self.std)
         return input_linked_tensor
     else:
         output_linked_tensor = input_linked_tensor.sub(self.mean).div_(
             self.std)
         return output_linked_tensor
示例#7
0
def test_normal_transform():
    from viz_api.viz_pytorch_api.input import ImageConstant_Torch
    image_tensor_input = ImageConstant_Torch(
        image_path="../../static/img/boat.jpg",
        imsize=512,
        device=torch.device("cuda:0")).get_saved_tensor()
    normal_transform = NormalizeTransform_Torch(inplace_forward=True)
    image_tensor_output = Tensor_Torch(
        normal_transform.forward(image_tensor_input))
    print(image_tensor_input.get_linked_tensor().size(),
          image_tensor_output.get_linked_tensor().size())
    print(image_tensor_input.get_device(), image_tensor_output.get_device())
示例#8
0
 def __init__(self,
              image_path: str,
              imsize: int = 512,
              name: str = "ImageConstant_Torch",
              device=torch.device("cpu")):
     super(ImageConstant_Torch, self).__init__(name)
     loader = transforms.Compose(
         [transforms.Resize(imsize),
          transforms.ToTensor()])
     image = loader(Image.open(image_path, mode="r")).unsqueeze(0)
     self.linked_tensor_torch = Tensor_Torch(image.to(device, torch.float),
                                             name=self.name +
                                             "_image_tensor")
示例#9
0
def test_gram_matrix_transform():
    from viz_api.viz_pytorch_api.input import ImageConstant_Torch
    image_tensor_input = ImageConstant_Torch(
        image_path="../../static/img/boat.jpg",
        imsize=512,
        device=torch.device("cuda:0")).get_saved_tensor()
    gram_transform = GetGramMatrix_Torch(inplace_forward=True)
    gram_tensor_output = Tensor_Torch(
        gram_transform.forward(image_tensor_input))
    print(image_tensor_input.get_linked_tensor().size(),
          gram_tensor_output.get_linked_tensor().size())
    print(image_tensor_input.get_device(), gram_tensor_output.get_device())

    from viz_api.viz_pytorch_api.input import ImageConstant_Torch
    image_tensor_input = ImageConstant_Torch(
        image_path="../../static/img/boat.jpg",
        imsize=512,
        device=torch.device("cuda:0")).get_saved_tensor()
    gram_transform = GetGramMatrix_Torch(inplace_forward=False)
    gram_tensor_output = Tensor_Torch(
        gram_transform.forward(image_tensor_input))
    print(image_tensor_input.get_linked_tensor().size(),
          gram_tensor_output.get_linked_tensor().size())
    print(image_tensor_input.get_device(), gram_tensor_output.get_device())


#test_flat_transform()
#test_normal_transform()
#test_data_clamp_transform()
#test_detach_transform()
#test_add_transform()
#test_gram_matrix_transform()
示例#10
0
 def forward(self, input_tensor: Tensor_Torch):
     input_linked_tensor = input_tensor.get_linked_tensor()
     # reference in Pytorch Style Transfer Tutorial
     a, b, c, d = input_linked_tensor.size()  # should be as four tuple
     # b=number of feature maps
     # (c,d)=dimensions of a f. map (N=c*d)
     features = input_linked_tensor.view(a * b, c *
                                         d)  # resise F_XL into \hat F_XL
     G = torch.mm(features, features.t())  # compute the gram product
     # we 'normalize' the values of the gram matrix
     # by dividing by the number of element in each feature maps.
     output_linked_tensor = G.div(a * b * c * d)
     if self.inplace_forward:
         input_tensor.set_linked_tensor(output_linked_tensor)
     return output_linked_tensor
示例#11
0
def test_logsoftmax():
    device = torch.device("cuda:0")
    m = LogSoftmax_Torch(dim=1, device=device, inplace_forward=True)
    input = Tensor_Torch(torch.randn(2, 3, device=device))
    output = Tensor_Torch(m.forward(input))

    print(input.get_device(), m.get_device(), output.get_device())
    print(input.name, "---", input.get_self_memory_size(), "---",
          input.get_grad_memory_size())
    print(m.name, "---", m.get_feature_memory_size(), "---",
          m.get_grad_memory_size())
    print(torch.eq(input.get_linked_tensor(), output.get_linked_tensor()))


#test_logsoftmax()
示例#12
0
def test_layer_node():
    import torch
    from viz_api.viz_pytorch_api import layer
    device = torch.device("cuda:0")

    GeneratedDict = {
        "in_features": 128,
        "out_features": 10,
        "inplace_forward": False,
        "import_layer": None,
        "name": "Linear",
        "bias": True,
        "device": device
    }
    linear1 = LayerNode_Torch(layer.Linear_Torch, GenerateDict=GeneratedDict)
    linear1.set_output_port(2)

    test_rand = Tensor_Torch(torch.randn(10, 128).to(device))
    linear1.forward([test_rand])
    for i in range(len(linear1.outputMapping)):
        outputTensor = linear1.outputMapping[i]
        print(outputTensor.name, id(outputTensor.get_linked_tensor()),
              outputTensor.get_linked_tensor().size())
    print(linear1.get_tensor_memory_size(),
          linear1.get_tensor_grad_memory_size())
    print(linear1.get_layer_feature_memory_size(),
          linear1.get_layer_grad_memory_size())
示例#13
0
 def forward(self, input_tensor: Tensor_Torch):
     input_linked_tensor = input_tensor.get_linked_tensor()
     if self.inplace_forward:
         input_linked_tensor.detach_()
         return input_linked_tensor
     else:
         output_linked_tensor = input_linked_tensor.detach()
         return output_linked_tensor
示例#14
0
 def __init__(self,
              tensor_path: str,
              name: str = "TensorLoader_Torch",
              device=torch.device("cpu")):
     super(TensorLoader_Torch, self).__init__(name)
     self.linked_tensor_group = list()
     self.linked_tensor_group.append(
         Tensor_Torch(torch.load(tensor_path).to(device),
                      name=self.name + "_saved_tensor_1"))
示例#15
0
 def __init__(self,
              view: list,
              name: str = "RandomeLoader_Torch",
              device=torch.device("cpu")):
     super(RandomLoader_Torch, self).__init__(name)
     self.linked_tensor_group = list()
     self.linked_tensor_group.append(
         Tensor_Torch(torch.randn(*view, device=device),
                      name=self.name + "_random_tensor_1"))
示例#16
0
 def __init__(self,
              view: list,
              value: int,
              name: str = "ConstantLoader_Torch",
              device=torch.device("cpu")):
     super(ConstantLoader_Torch, self).__init__(name)
     self.linked_tensor_group = list()
     self.linked_tensor_group.append(
         Tensor_Torch(torch.add(torch.zeros(*view, device=device), value),
                      name=self.name + "_const_tensor_1"))
示例#17
0
 def forward(self, input_tensor: Tensor_Torch):
     input_linked_tensor = input_tensor.get_linked_tensor()
     if self.inplace_forward:
         input_linked_tensor.clamp_(self.clamp_range[0],
                                    self.clamp_range[1])
         return input_linked_tensor
     else:
         output_linked_tensor = input_linked_tensor.clamp(
             self.clamp_range[0], self.clamp_range[1])
         return output_linked_tensor
示例#18
0
def test_transform_node():
    import torch
    from viz_api.viz_pytorch_api import transform
    device = torch.device("cuda:0")

    GeneratedDict = {"inplace_forward": True, "name": "Add1"}
    one_tensor_input_1 = Tensor_Torch(torch.ones(1, 1).to(device))
    one_tensor_input_2 = Tensor_Torch(torch.ones(1, 1).to(device))
    one_tensor_input_3 = Tensor_Torch(torch.ones(1, 1).to(device))
    add1 = TransformNode_Torch(transform.AddTransform_Torch,
                               GenerateDict=GeneratedDict)
    add1.set_output_port(2)

    add1.forward([one_tensor_input_1, one_tensor_input_2, one_tensor_input_3])
    print(one_tensor_input_1.get_linked_tensor())
    for i in range(len(add1.outputMapping)):
        outputTensor = add1.outputMapping[i]
        print(outputTensor.name, id(outputTensor.get_linked_tensor()),
              outputTensor.get_linked_tensor())

    GeneratedDict = {"inplace_forward": False, "name": "Add1"}
    one_tensor_input_1 = Tensor_Torch(torch.ones(1, 1).to(device))
    one_tensor_input_2 = Tensor_Torch(torch.ones(1, 1).to(device))
    one_tensor_input_3 = Tensor_Torch(torch.ones(1, 1).to(device))
    add1 = TransformNode_Torch(transform.AddTransform_Torch,
                               GenerateDict=GeneratedDict)
    add1.set_output_port(2)

    add1.forward([one_tensor_input_1, one_tensor_input_2, one_tensor_input_3])
    print(one_tensor_input_1.get_linked_tensor())
    for i in range(len(add1.outputMapping)):
        outputTensor = add1.outputMapping[i]
        print(outputTensor.name, id(outputTensor.get_linked_tensor()),
              outputTensor.get_linked_tensor())


#test_layer_node()
#test_transform_node()
示例#19
0
 def __init__(self,
              root: str,
              max_batch_size: int = 1,
              shuffle: bool = False,
              train: bool = True,
              download: bool = False,
              name: str = "MnistDataSetLoader_Torch",
              device=torch.device("cpu")):
     super(MnistDataSetLoader_Torch, self).__init__(name)
     # standard load procedure for MNIST dataset
     mnist_data = MNIST(root,
                        train=train,
                        download=download,
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307, ), (0.3081, ))
                        ]))
     # mnist have 60000 training, 10000 validation, the batch size should divisible by that number
     batch_size = max_batch_size
     image_number = 60000 if train else 10000
     for i in range(max_batch_size, 0, -1):
         if image_number % i == 0:
             batch_size = i
             break
     # load data in batch, which is a iterable list
     mnist_data_loader = DataLoader(mnist_data,
                                    batch_size=batch_size,
                                    shuffle=shuffle)
     # transfer the data to expected device, pack into Tensor_Torch
     self.linked_tensor_group_img = list()
     self.linked_tensor_group_label = list()
     for i, (images, labels) in enumerate(mnist_data_loader):
         self.linked_tensor_group_img.append(
             Tensor_Torch(images.to(device),
                          name=self.name + "_img_tensor_" + str(i + 1)))
         self.linked_tensor_group_label.append(
             Tensor_Torch(labels.to(device),
                          name=self.name + "_label_tensor_" + str(i + 1)))
示例#20
0
class ImageConstant_Torch(input.ImageConstant):
    def __init__(self,
                 image_path: str,
                 imsize: int = 512,
                 name: str = "ImageConstant_Torch",
                 device=torch.device("cpu")):
        super(ImageConstant_Torch, self).__init__(name)
        loader = transforms.Compose(
            [transforms.Resize(imsize),
             transforms.ToTensor()])
        image = loader(Image.open(image_path, mode="r")).unsqueeze(0)
        self.linked_tensor_torch = Tensor_Torch(image.to(device, torch.float),
                                                name=self.name +
                                                "_image_tensor")

    def get_saved_tensor(self):
        return self.linked_tensor_torch

    def set_device(self, device: torch.device):
        self.linked_tensor_torch.set_device(device=device)

    def get_device(self):
        return self.linked_tensor_torch.get_device()

    # return KB in memory usage for the loaded tensor
    def get_tensor_memory_size(self):
        return self.linked_tensor_torch.get_self_memory_size()

    # return KB in memory usage for gradients of the loaded tensor
    def get_tensor_grad_memory_size(self):
        return self.linked_tensor_torch.get_grad_memory_size()

    def remove_from_tracking_gradient(self):
        return self.linked_tensor_torch.remove_from_tracking_gradient()

    def start_tracking_gradient(self):
        return self.linked_tensor_torch.start_tracking_gradient()

    @staticmethod
    def get_description():
        return "Loader for single image"
示例#21
0
class ConstantConstant_Torch(input.ConstantConstant):
    def __init__(self,
                 view: list,
                 value: int,
                 name: str = "ConstantConstant_Torch",
                 device=torch.device("cpu")):
        super(ConstantConstant_Torch, self).__init__(name)
        self.linked_tensor_torch = Tensor_Torch(
            torch.add(torch.zeros(*view, device=device), value),
            name=self.name + "const_tensor")

    def get_saved_tensor(self):
        return self.linked_tensor_torch

    def set_device(self, device: torch.device):
        self.linked_tensor_torch.set_device(device=device)

    def get_device(self):
        return self.linked_tensor_torch.get_device()

    # return KB in memory usage for the loaded tensor
    def get_tensor_memory_size(self):
        return self.linked_tensor_torch.get_self_memory_size()

    # return KB in memory usage for gradients of the loaded tensor
    def get_tensor_grad_memory_size(self):
        return self.linked_tensor_torch.get_grad_memory_size()

    def remove_from_tracking_gradient(self):
        return self.linked_tensor_torch.remove_from_tracking_gradient()

    def start_tracking_gradient(self):
        return self.linked_tensor_torch.start_tracking_gradient()

    @staticmethod
    def get_description():
        return "Constant tensor constant (1, 0)"
示例#22
0
class TensorConstant_Torch(input.TensorConstant):
    def __init__(self,
                 tensor_path: str,
                 name: str = "TensorConstant_Torch",
                 device=torch.device("cpu")):
        super(TensorConstant_Torch, self).__init__(name)
        self.linked_tensor_torch = Tensor_Torch(
            torch.load(tensor_path).to(device),
            name=self.name + "_saved_tensor")

    def get_saved_tensor(self):
        return self.linked_tensor_torch

    def set_device(self, device: torch.device):
        self.linked_tensor_torch.set_device(device=device)

    def get_device(self):
        return self.linked_tensor_torch.get_device()

    # return KB in memory usage for the loaded tensor
    def get_tensor_memory_size(self):
        return self.linked_tensor_torch.get_self_memory_size()

    # return KB in memory usage for gradients of the loaded tensor
    def get_tensor_grad_memory_size(self):
        return self.linked_tensor_torch.get_grad_memory_size()

    def remove_from_tracking_gradient(self):
        return self.linked_tensor_torch.remove_from_tracking_gradient()

    def start_tracking_gradient(self):
        return self.linked_tensor_torch.start_tracking_gradient()

    @staticmethod
    def get_description():
        return "Constant Tensor constant"
示例#23
0
 def forward(self, input_tensor: Tensor_Torch, target_tensor: Tensor_Torch):
     return self.nllloss(input_tensor.get_linked_tensor(),
                         target_tensor.get_linked_tensor())
示例#24
0
 def forward(self, input_tensor: Tensor_Torch):
     # make relu always not inplace update
     return self.relu(input_tensor.get_linked_tensor())
示例#25
0
 def forward(self, input_tensor: Tensor_Torch):
     linked_tensor = self.linear(input_tensor.get_linked_tensor())
     if self.inplace_forward:
         input_tensor.set_linked_tensor(linked_tensor)
     return linked_tensor
示例#26
0
def test_data_clamp_transform():
    rand_tensor_input = Tensor_Torch(torch.randn(3, 2).add(5))
    clamp_transform = DataClampTransform_Torch((0, 1), inplace_forward=True)
    rand_tensor_output = Tensor_Torch(
        clamp_transform.forward(rand_tensor_input))
    print(rand_tensor_output.get_linked_tensor())
    print(
        torch.eq(rand_tensor_input.get_linked_tensor(),
                 rand_tensor_output.get_linked_tensor()))

    rand_tensor_input = Tensor_Torch(torch.randn(3, 2).add(5))
    clamp_transform = DataClampTransform_Torch((0, 1), inplace_forward=False)
    rand_tensor_output = Tensor_Torch(
        clamp_transform.forward(rand_tensor_input))
    print(rand_tensor_output.get_linked_tensor())
    print(
        torch.eq(rand_tensor_input.get_linked_tensor(),
                 rand_tensor_output.get_linked_tensor()))
示例#27
0
def test_detach_transform():
    rand_tensor_input = Tensor_Torch(torch.randn(3, 2).requires_grad_(True))
    detach_transform = DetachTransform_Torch(inplace_forward=True)
    rand_tensor_output = Tensor_Torch(
        detach_transform.forward(rand_tensor_input))
    print(rand_tensor_input.get_linked_tensor().requires_grad,
          rand_tensor_output.get_linked_tensor().requires_grad)

    rand_tensor_input = Tensor_Torch(torch.randn(3, 2).requires_grad_(True))
    detach_transform = DetachTransform_Torch(inplace_forward=False)
    rand_tensor_output = Tensor_Torch(
        detach_transform.forward(rand_tensor_input))
    print(rand_tensor_input.get_linked_tensor().requires_grad,
          rand_tensor_output.get_linked_tensor().requires_grad)
示例#28
0
def test_add_transform():
    one_tensor_input_1 = Tensor_Torch(torch.ones(1, 1))
    one_tensor_input_2 = Tensor_Torch(torch.ones(1, 1))
    one_tensor_input_3 = Tensor_Torch(torch.ones(1, 1))
    add_transform = AddTransform_Torch(inplace_forward=True)
    one_tensor_output = Tensor_Torch(
        add_transform.forward(one_tensor_input_1, one_tensor_input_2,
                              one_tensor_input_3))
    print(one_tensor_input_1.get_linked_tensor(),
          one_tensor_input_2.get_linked_tensor(),
          one_tensor_input_3.get_linked_tensor(),
          one_tensor_output.get_linked_tensor())

    one_tensor_input_1 = Tensor_Torch(torch.ones(1, 1))
    one_tensor_input_2 = Tensor_Torch(torch.ones(1, 1))
    one_tensor_input_3 = Tensor_Torch(torch.ones(1, 1))
    add_transform = AddTransform_Torch(inplace_forward=False)
    one_tensor_output = Tensor_Torch(
        add_transform.forward(one_tensor_input_1, one_tensor_input_2,
                              one_tensor_input_3))
    print(one_tensor_input_1.get_linked_tensor(),
          one_tensor_input_2.get_linked_tensor(),
          one_tensor_input_3.get_linked_tensor(),
          one_tensor_output.get_linked_tensor())