Esempio n. 1
0
def main():

    #trivial test

    X = np.random.randn(100, 5)
    A = np.random.randn(10, 5)
    bias = np.random.randn(10)

    Y = np.dot(A, X.T).T + bias
    print(np.dot(A, X.T).T.shape)

    model = nn.Sequential()
    model.add(nn.Linear(5, 10))
    model.add(nn.Tanh(10))
    model.add(nn.Linear(10, 10))
    model.add(nn.MSE(10))

    print("Batch mode")
    for i in xrange(10000):
        print("Loss", model.forward(X, Y))
        model.backward(alpha=0.1)

    print("Single mode")
    for i in xrange(10000):
        for j in xrange(X.shape[0]):
            print("Loss", model.forward(X[j], Y[j]))
            model.backward(alpha=0.1)
Esempio n. 2
0
 def test_0(self):
     s = nn.Sigmoid()
     inputs = np.asarray([0], dtype=np.float16)
     s.call(inputs)
     self.assertEqual(inputs[0], 0.5)
     inputs[:] = 0
     t = nn.Tanh()
     t.call(inputs)
     self.assertEqual(inputs[0], 0)
Esempio n. 3
0
def grad_check_all(inputs, eps, tolerance):
    print('gradient check', end='...')
    assert(nn.grad_check(nn.Sigmoid(inputs.shape[1]), inputs, eps, tolerance))
    assert(nn.grad_check(nn.LogSoftMax(inputs.shape[1]), inputs, eps, tolerance))
    assert(nn.grad_check(nn.SoftMax(inputs.shape[1]), inputs, eps, tolerance))
    assert(nn.grad_check(nn.ReLU(inputs.shape[1]), inputs, eps, tolerance))
    assert(nn.grad_check(nn.Tanh(inputs.shape[1]), inputs, eps, tolerance))
    assert(nn.grad_check(nn.Linear(inputs.shape[1], max(1, inputs.shape[1] - 3)), inputs, eps, tolerance))
    assert(nn.grad_check(nn.CrossEntropy(inputs.shape[1]), inputs, eps, tolerance, np.random.rand(*inputs.shape)))
    print('[OK]')
Esempio n. 4
0
  def __init__(self, x_dim, d_dim, z1_dim, pool = 'mean'):
    super(G_inv_Tanh, self).__init__()
    self.d_dim = d_dim
    self.x_dim = x_dim
    self.z1_dim = z1_dim
    self.pool = pool

    if pool == 'max':
        self.phi = nn.Sequential(
          PermEqui2_max(self.x_dim, self.d_dim),
          nn.Tanh(),
          PermEqui2_max(self.d_dim, self.d_dim),
          nn.Tanh(),
          PermEqui2_max(self.d_dim, self.d_dim),
          nn.Tanh(),
        )
    elif pool == 'max1':
        self.phi = nn.Sequential(
          PermEqui1_max(self.x_dim, self.d_dim),
          nn.Tanh(),
          PermEqui1_max(self.d_dim, self.d_dim),
          nn.Tanh(),
          PermEqui1_max(self.d_dim, self.d_dim),
          nn.Tanh(),
        )
    elif pool == 'mean':
        self.phi = nn.Sequential(
          PermEqui2_mean(self.x_dim, self.d_dim),
          nn.Tanh(),
          PermEqui2_mean(self.d_dim, self.d_dim),
          nn.Tanh(),
          PermEqui2_mean(self.d_dim, self.d_dim),
          nn.Tanh(),
        )
    
    self.ro = nn.Sequential(
       nn.Linear(self.d_dim, self.d_dim),
       nn.Tanh(),
       nn.Linear(self.d_dim, self.z1_dim),
    )
    print(self) 
    self.faster_parameters = [p for p in self.parameters()]
Esempio n. 5
0
    def test_pos(self):
        s = nn.Sigmoid()
        inputs = np.arange(11, dtype=np.float)
        s.call(inputs)
        n = np.sum(inputs < 0.9)
        self.assertEqual(n, 3)

        inputs[:] = np.arange(11) - 5
        t = nn.Tanh()
        t.call(inputs)
        for i in range(6):
            self.assertAlmostEqual(inputs[5 + i] + inputs[5 - i], 0)
                                    ('conv2_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu2_2', nn.ReLU()),
                                    ('pool2', nn.MaxPool2d(2,2)),

                                    ('conv3_1', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu3_1', nn.ReLU()),
                                    ('weight_norm3_1', nn.utils.weight_norm()), # Look into this later
                                    ('conv3_2', nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding = 1)),
                                    ('relu3_2', nn.ReLU())
                                    ('weight_norm3_2', nn.utils.weight_norm()), # Look into this later
                                    ('pool3', nn.MaxPool2d(2,2)),

                                    ('fc4', nn.Linear(12288, 100)),
                                    ('tanh4' nn.Tanh()),
                                    ('fc5', nn.Linear(100,2)),
                                    ('tanh5' nn.Tanh()),
                                    ('fc6', nn.Linear(2,1)),
                                    # ('sigmoid6' F.Sigmoid())
                                                              ]))
  return F.Sigmoid(net)

class DiscriminatorNet(nn.Module):
  def __init__(self):
    super(DiscriminatorNet, self).__init__()
    
    self.net = nn.Sequential(OrderedDict([
                                            ('merge', nn.Conv2d(4, 3, kernel_size=1,stride = 1, padding = 0)),
                                            ('conv1', nn.Conv2d(3, 32, kernel_size=3,stride = 1, padding = 1)),
                                            ('relu1', nn.ReLU()),
                                            ('pool1',  nn.MaxPool2d(4,4)),