def __init__(self, hidden_dim=10):
     super(conv_Update, self).__init__()
     self.hidden_dim = hidden_dim
     dtype = torch.cuda.FloatTensor
     self.update = ConvGRU(input_dim=hidden_dim,
                           hidden_dim=hidden_dim,
                           kernel_size=(1, 1),
                           num_layers=1,
                           dtype=dtype,
                           batch_first=True,
                           bias=True,
                           return_all_layers=False)
Пример #2
0
 def __init__(self, hidden_dim=10, paths_len=3):
     super(conv_Update, self).__init__()
     self.hidden_dim = hidden_dim
     # detect if CUDA is available or not
     use_gpu = torch.cuda.is_available()
     if use_gpu:
         dtype = torch.cuda.FloatTensor  # computation in GPU
     else:
         dtype = torch.FloatTensor
     self.conv_update = ConvGRU(input_dim=paths_len * hidden_dim,
                                hidden_dim=hidden_dim,
                                kernel_size=(1, 1),
                                num_layers=1,
                                dtype=dtype,
                                batch_first=True,
                                bias=True,
                                return_all_layers=False)
Пример #3
0
 def __init__(self, hidden_dim=10, paths_len=3):
     super(conv_Update, self).__init__()
     self.hidden_dim = hidden_dim
     # self.conv_update = nn.Sequential(
     #     nn.Conv2d((paths_len+1) * hidden_dim, 2 * hidden_dim, kernel_size=3, padding=1, stride=1, bias=False),
     #     BatchNorm2d(2 * hidden_dim), nn.ReLU(inplace=False),
     #     nn.Conv2d(2 * hidden_dim, hidden_dim, kernel_size=3, padding=1, stride=1, bias=False),
     #     BatchNorm2d(hidden_dim), nn.ReLU(inplace=False)
     # )
     # detect if CUDA is available or not
     use_gpu = torch.cuda.is_available()
     if use_gpu:
         dtype = torch.cuda.FloatTensor  # computation in GPU
     else:
         dtype = torch.FloatTensor
     self.conv_update = ConvGRU(input_dim=paths_len* hidden_dim,
                     hidden_dim=hidden_dim,
                     kernel_size=(1,1),
                     num_layers=1,
                     dtype=dtype,
                     batch_first=True,
                     bias=True,
                     return_all_layers=False)
Пример #4
0
 def __init__(self, in_dim, hidden_dim=10, inputs_num=1):
     super(conv_Update, self).__init__()
     self.hidden_dim = hidden_dim
     self.update = ConvGRU(input_dim=in_dim,
                           hidden_dim=hidden_dim,
                           inputs_num=inputs_num)