示例#1
0
    def __init__(self, child, dim=None):
        super().__init__()

        self.children = [child.grad_op]
        self.dim = dim  # Dimension (Integer) along which to perform the summation

        if dim is None or len(child.size()) == 1:
            # The output is a scalar, we need to wrap it into a Tensor
            self.variable = lib.Variable(FloatTensor([child.tensor.sum()]),
                                         grad_op=self,
                                         is_leaf=False)
        else:
            self.variable = lib.Variable(child.tensor.sum(dim=dim),
                                         grad_op=self,
                                         is_leaf=False)
    def __init__(self, child):
        super().__init__()

        self.children = [child.grad_op]
        self.variable = lib.Variable(child.tensor.t(),
                                     grad_op=self,
                                     is_leaf=False)
示例#3
0
    def __init__(self, child, power):
        super().__init__()

        self.children = [child.grad_op]
        self.power = power
        self.variable = lib.Variable(child.tensor.pow(power),
                                     grad_op=self,
                                     is_leaf=False)
示例#4
0
    def __init__(self, child, dim, repetitions):
        super().__init__()

        self.dim = dim
        self.repetitions = repetitions

        dimensions = [1] * (len(child.size()) + 1)
        dimensions[self.dim] = self.repetitions

        self.children = [child.grad_op]
        self.variable = lib.Variable(child.tensor.unsqueeze(self.dim).repeat(*dimensions), grad_op=self, is_leaf=False)
示例#5
0
class Linear(Module):
    def __init__(self, in_dim, out_dim):
        super().__init__()

        self.w = Variable(Tensor(out_dim, in_dim), name="w")
        self.b = Variable(Tensor(out_dim), name="b")
        self.__initParams()

    def __initParams(self):
        """
        Initialize weights using Xavier initialization
        """
        std = math.sqrt(2.0 / (self.w.tensor.size(0) + self.w.tensor.size(1)))
        self.w.tensor.normal_(0, std)
        self.b.tensor.normal_(0, std)

    def forward(self, input):
        return input.mm(self.w.t()) + self.b.repeat(0, input.size(dim=0))

    def __repr__(self):
        return "Linear(in_dim={}, out_dim={})".format(self.w.tensor.size(1), self.w.tensor.size(0))
示例#6
0
if __name__ == "__main__":
    # Defining the parameters
    N, eta, nb_epochs = 1000, 1e-1, 500

    # Generating input and target data for training and testing
    train_input, train_target = generate_data(N)
    validation_input, validation_target = generate_data(N)
    test_input, test_target = generate_data(N)

    mean, std = train_input.mean(), train_input.std()

    train_input.sub_(mean).div_(std)
    validation_input.sub_(mean).div_(std)
    test_input.sub_(mean).div_(std)

    train_input, train_target = Variable(train_input,
                                         name="x"), Variable(train_target,
                                                             name="target")
    validation_input, validation_target = Variable(validation_input), Variable(
        validation_target)
    test_input, test_target = Variable(test_input), Variable(test_target)

    # Defining the model
    model = Sequential(Linear(2, 25), ReLU(), Linear(25, 25), ReLU(),
                       Linear(25, 25), ReLU(), Linear(25, 2), Softmax(dim=1))
    print("Model:", model)

    # Drawing the tree of gradient operations
    l = LossMSE(model(train_input), train_target)
    l.name = "MSE Loss"
    dot = l.draw_graph()
    dot.render("plots/model.gv", view=True)
示例#7
0
    def __init__(self, a, b):
        super().__init__()

        self.children = [a.grad_op, b.grad_op]
        self.variable = lib.Variable(a.tensor * b.tensor, grad_op=self, is_leaf=False)
示例#8
0
    def __init__(self, in_dim, out_dim):
        super().__init__()

        self.w = Variable(Tensor(out_dim, in_dim), name="w")
        self.b = Variable(Tensor(out_dim), name="b")
        self.__initParams()