Beispiel #1
0
def _concatenate(input, task):
  flattened_input = flatten(input, batch=True)
  flattened_task = flatten(task, batch=False)
  concatenated = torch.cat([
    flattened_input.unsqueeze(1),
    batchexpand(flattened_task, flattened_input).unsqueeze(1)
  ], 1)
  return concatenated
Beispiel #2
0
 def compute(self, input, task):
   flattened_input = flatten(input, batch=True)
   flattened_task = flatten(task, batch=False)
   flattened_task = batchexpand(flattened_task, flattened_input)
   result = self.bilinear(flattened_input, flattened_task)
   if self.batch_norm != None:
     result = self.batch_norm(result)
   return result
Beispiel #3
0
 def forward(self, input, task):
   concatenated = _concatenate(input, task)
   combined = self.connected(concatenated)
   combined = func.dropout(combined, training=True)
   if self.batch_norm != None:
     combined = self.batch_norm(flatten(combined, batch=True)).unsqueeze(1)
   result = self.evaluator(combined)
   return result
Beispiel #4
0
    def forward(self, input, task=None):
        if task != None and not self.frozen:
            self.task_representation = self.task_embedding(task)
        input_representation = self.embedding(input)

        result = torch.zeros((input_representation.size()[0],
                              self.task_representation.size()[0]))
        result = result.to(input.device)

        for idx in range(self.task_representation.size()[0]):
            subtask = self.task_representation[idx, :]
            subresult = flatten(self.metric(input_representation, subtask))
            result[:, idx] = func.sigmoid(subresult)

        return result