Esempio n. 1
0
def test_gpu_node_tanh(a):
    set_cuda_active(True)

    g1 = Variable(a)

    g3 = rm.sum(rm.tanh(g1))
    g = g3.grad()
    g_g1 = g.get(g1)
    g3.to_cpu()

    set_cuda_active(False)
    c3 = rm.sum(rm.tanh(g1))
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
 def forward(self, x):
     hidden = self.batch(x)
     hidden = rm.tanh(hidden)
     hidden = self.conv(hidden)
     if self.dropout:
         hidden = rm.dropout(hidden)
     hidden = rm.concat(x, hidden)
     if self.depth != 1:
         return self.under_model(hidden)
     #print(hidden.shape)
     return self.output(hidden)
 def forward(self, x):
     layers = self.hidden._layers
     hidden = self.input(x)
     i = 0
     for _ in range(self.depth):
         main_stream = hidden
         hidden = layers[i](main_stream)
         i += 1
         hidden = rm.tanh(hidden)
         hidden = layers[i](hidden)
         i += 1
         if self.dropout:
             hidden = rm.dropout(hidden)
         hidden = rm.concat(main_stream, hidden)
     #print(hidden.shape)
     return self.output(hidden)
 def forward(self, x):
     h = self.transform(x)
     #print(h.shape)
     h = rm.reshape(h, (len(x), self.channels, self.dim, self.dim))
     #print(h.shape)
     layers = self.hidden._layers
     length = len(layers) if not self.batch_normal else len(layers)//2
     for i in range(length):
         if self.batch_normal:
             h = layers[2*i](h)
             h = rm.relu(layers[2*i+1](h))
         else:
             h = rm.relu(layers[i](h))
         #print(h.shape)
     h = self.output(h)
     #return rm.sigmoid(h)
     return rm.tanh(h)
Esempio n. 5
0
 def forward(self, x):
     return self.output(rm.tanh(self.hidden(self.input(x))))