Example #1
0
 def __init__(self,
              num_features: int,
              eps: float = 1e-5,
              momentum: float = 0.1,
              affine: bool = True,
              track_running_stats: bool = True) -> None:
     super(_NormBase, self).__init__()
     self.num_features = num_features
     self.eps = eps
     self.momentum = momentum
     self.affine = affine
     self.track_running_stats = track_running_stats
     if self.affine:
         self.weight = Parameter(torch.Tensor(num_features))
         self.bias = Parameter(torch.Tensor(num_features))
     else:
         self.register_parameter('weight', None)
         self.register_parameter('bias', None)
     if self.track_running_stats:
         self.register_buffer('running_mean', torch.zeros(num_features))
         self.register_buffer('running_var', torch.ones(num_features))
         self.register_buffer('num_batches_tracked',
                              torch.tensor(0, dtype=torch.long))
     else:
         self.register_parameter('running_mean', None)
         self.register_parameter('running_var', None)
         self.register_parameter('num_batches_tracked', None)
     self.reset_parameters()
Example #2
0
 def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
     self.check_forward_input(input)
     if hx is None:
         hx = torch.zeros(input.size(0),
                          self.hidden_size,
                          dtype=input.dtype,
                          device=input.device)
     self.check_forward_hidden(input, hx, '')
     if self.nonlinearity == "tanh":
         ret = _VF.rnn_tanh_cell(
             input,
             hx,
             self.weight_ih,
             self.weight_hh,
             self.bias_ih,
             self.bias_hh,
         )
     elif self.nonlinearity == "relu":
         ret = _VF.rnn_relu_cell(
             input,
             hx,
             self.weight_ih,
             self.weight_hh,
             self.bias_ih,
             self.bias_hh,
         )
     else:
         ret = input  # TODO: remove when jit supports exception flow
         raise RuntimeError("Unknown nonlinearity: {}".format(
             self.nonlinearity))
     return ret
Example #3
0
 def __init__(self,
              in_features: int,
              out_features: int,
              bias: bool = True,
              activation=None) -> None:
     super(Linear, self).__init__()
     self.in_features = in_features
     self.out_features = out_features
     self.weight = Parameter(torch.zeros(out_features, in_features))
     if bias:
         self.bias = Parameter(torch.zeros(out_features))
     else:
         self.register_parameter('bias', None)
     # print(self.weight)
     self.reset_parameters()
     # print(self.weight)
     self.activation = activation
Example #4
0
 def backward(ctx, grad_output):
     with tp.no_grad():
         nbin = ctx.nbin
         data_pair = ctx.data_pair
         nbatch, nhist, ndata = data_pair.ishape
         dPdI1 = tp.zeros(ctx.Ishape)
         dPdI2 = tp.zeros(ctx.Ishape)
         for shift in ctx.window:
             # [nbatch] x {nhist} x ndata
             shift = shift.view(1, 2, 1)
             hist_pos = data_pair * nbin
             index = torch.clamp(
                 torch.floor(hist_pos).long() + shift, 0, nbin - 1)
             grad_y = grad_output[(slice(None), ) +
                                  index.split(1, 1)].squeeze(2)
             value = grad_y.gather(
                 0,
                 tp.arange(nbatch).long().unsqueeze(0).unsqueeze(-1).repeat(
                     1, 1, ndata)).view(ctx.Ishape)
             dPdI1 += value * dBspline_WRT_I1(
                 shift, tp.decimal(data_pair * nbin)).view(ctx.Ishape)
             dPdI2 += value * dBspline_WRT_I2(
                 shift, tp.decimal(data_pair * nbin)).view(ctx.Ishape)
     return dPdI1, dPdI2, None
Example #5
0
 def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
     self.check_forward_input(input)
     if hx is None:
         hx = torch.zeros(input.size(0),
                          self.hidden_size,
                          dtype=input.dtype,
                          device=input.device)
     self.check_forward_hidden(input, hx, '')
     return _VF.gru_cell(
         input,
         hx,
         self.weight_ih,
         self.weight_hh,
         self.bias_ih,
         self.bias_hh,
     )
Example #6
0
    def forward(self, input, hx=None):  # noqa: F811
        orig_input = input
        # xxx: isinstance check needs to be in conditional for TorchScript to compile
        if isinstance(orig_input, PackedSequence):
            input, batch_sizes, sorted_indices, unsorted_indices = input
            max_batch_size = batch_sizes[0]
            max_batch_size = int(max_batch_size)
        else:
            batch_sizes = None
            max_batch_size = input.size(0) if self.batch_first else input.size(
                1)
            sorted_indices = None
            unsorted_indices = None

        if hx is None:
            num_directions = 2 if self.bidirectional else 1
            hx = torch.zeros(self.num_layers * num_directions,
                             max_batch_size,
                             self.hidden_size,
                             dtype=input.dtype,
                             device=input.device)
        else:
            # Each batch of the hidden state should match the input sequence that
            # the user believes he/she is passing in.
            hx = self.permute_hidden(hx, sorted_indices)

        self.check_forward_args(input, hx, batch_sizes)
        if batch_sizes is None:
            result = _VF.gru(input, hx, self._flat_weights, self.bias,
                             self.num_layers, self.dropout, self.training,
                             self.bidirectional, self.batch_first)
        else:
            result = _VF.gru(input, batch_sizes, hx, self._flat_weights,
                             self.bias, self.num_layers, self.dropout,
                             self.training, self.bidirectional)
        output = result[0]
        hidden = result[1]

        # xxx: isinstance check needs to be in conditional for TorchScript to compile
        if isinstance(orig_input, PackedSequence):
            output_packed = PackedSequence(output, batch_sizes, sorted_indices,
                                           unsorted_indices)
            return output_packed, self.permute_hidden(hidden, unsorted_indices)
        else:
            return output, self.permute_hidden(hidden, unsorted_indices)
Example #7
0
    def forward(self,
                input: Tensor,
                hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
        is_packed = isinstance(input, PackedSequence)
        if is_packed:
            input, batch_sizes, sorted_indices, unsorted_indices = input
            max_batch_size = batch_sizes[0]
            max_batch_size = int(max_batch_size)
        else:
            batch_sizes = None
            max_batch_size = input.size(0) if self.batch_first else input.size(
                1)
            sorted_indices = None
            unsorted_indices = None

        if hx is None:
            num_directions = 2 if self.bidirectional else 1
            hx = torch.zeros(self.num_layers * num_directions,
                             max_batch_size,
                             self.hidden_size,
                             dtype=input.dtype,
                             device=input.device)
        else:
            # Each batch of the hidden state should match the input sequence that
            # the user believes he/she is passing in.
            hx = self.permute_hidden(hx, sorted_indices)

        self.check_forward_args(input, hx, batch_sizes)
        _impl = _rnn_impls[self.mode]
        if batch_sizes is None:
            result = _impl(input, hx, self._flat_weights, self.bias,
                           self.num_layers, self.dropout, self.training,
                           self.bidirectional, self.batch_first)
        else:
            result = _impl(input, batch_sizes, hx, self._flat_weights,
                           self.bias, self.num_layers, self.dropout,
                           self.training, self.bidirectional)
        output = result[0]
        hidden = result[1]

        if is_packed:
            output = PackedSequence(output, batch_sizes, sorted_indices,
                                    unsorted_indices)
        return output, self.permute_hidden(hidden, unsorted_indices)
Example #8
0
 def forward(self,
             input: Tensor,
             hx: Optional[Tuple[Tensor,
                                Tensor]] = None) -> Tuple[Tensor, Tensor]:
     self.check_forward_input(input)
     if hx is None:
         zeros = torch.zeros(input.size(0),
                             self.hidden_size,
                             dtype=input.dtype,
                             device=input.device)
         hx = (zeros, zeros)
     self.check_forward_hidden(input, hx[0], '[0]')
     self.check_forward_hidden(input, hx[1], '[1]')
     return _VF.lstm_cell(
         input,
         hx,
         self.weight_ih,
         self.weight_hh,
         self.bias_ih,
         self.bias_hh,
     )
Example #9
0
##############################
## Author: Yuncheng Zhou
##############################

import sys
sys.path.append("../..")

# import torchplus as tp
import torch
import torchplus as tp
print(tp.__file__)
from pyctlib import scope
import copy

print(tp.stack(tp.zeros(3, 4), tp.ones(3, 4), dim={1}))

#tp.set_autodevice(False)
#tp.manual_seed(0)
#with scope("test tp, cpu"):
#    t = tp.randn([3000, 400], requires_grad=True)
#    a = t
#    LP = tp.nn.Linear(400, 400)
#    for _ in range(10): a = LP(a)
#    a.sum().backward()
#
#torch.manual_seed(0)
#with scope("test torch, cpu"):
#    t_ = torch.randn([3000, 400], requires_grad=True)
#    a_ = t_
#    LP_ = torch.nn.Linear(400, 400)
Example #10
0
    assert t.is_cuda
    assert tp.tensor(np.array([1., 2.])).is_cuda

tp.set_autodevice(False)
tp.manual_seed(0)
with scope("test tp, cpu"):
    t = tp.randn(3000, 400, requires_grad=True)
    a = t
    LP = tp.nn.Linear(400, 400)
    for _ in range(10): a = LP(a).relu()
    a.sum().backward()

torch.manual_seed(0)
with scope("test torch, cpu"):
    t_ = torch.randn(3000, 400).requires_grad_()
    a_ = t_
    LP_ = torch.nn.Linear(400, 400)
    for _ in range(10): a_ = LP_(a_).relu()
    a_.sum().backward()

assert a.is_cuda is False
assert t.allclose(t_)
assert isinstance(t, tp.Tensor)
assert isinstance(a, tp.Tensor)
assert isinstance(LP.weight, tp.nn.Parameter)
assert isinstance(LP.bias, tp.nn.Parameter)
assert isinstance(tp.tensor(np.array([1., 2.])), tp.Tensor)

tp.nn.ParameterList([tp.nn.Parameter(tp.zeros(30)), tp.nn.Parameter(tp.zeros(30))])
tp.nn.ParameterList([LP.weight, LP.bias])
Example #11
0
import sys
import os
sys.path.append(os.path.abspath("."))
import pyctlib
import pathlib
import numpy as np
import torchplus as tp
from pyctlib import vector, vhelp

tp.zeros(10)
Example #12
0
#! python3 -u
#  -*- coding: utf-8 -*-

##############################
## Author: Yiteng Zhang
##############################

import sys
# sys.path.append("/Users/zhangyiteng/Software/Python_Lib/new_pyctlib/pyctlib")
# sys.path.append("../..")
import copy
import torch
sys.path = ["../.."] + sys.path


import torchplus as tp
from pyctlib import scope
from pyctlib import vector

##############################
## Test CPU
##############################

with scope("tp, cpu, cat"):
    tp.cat([tp.zeros(300, 300), tp.zeros(300, 300)])

with scope("torch, cpu, cat"):
    torch.cat([torch.zeros(300, 300), torch.zeros(300, 300)])