def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_features):
         expected_input_dim = self.abstract_child["_in_features"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_features)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     # create the weight matrix
     if not spaces.is_determined(self._out_features):
         out_dim = self.abstract_child["_out_features"].value
     else:
         out_dim = spaces.get_determined_value(self._out_features)
     candidate_weight = self._super_weight[:out_dim, :expected_input_dim]
     # create the bias matrix
     if not spaces.is_determined(self._bias):
         if self.abstract_child["_bias"].value:
             candidate_bias = self._super_bias[:out_dim]
         else:
             candidate_bias = None
     else:
         if spaces.get_determined_value(self._bias):
             candidate_bias = self._super_bias[:out_dim]
         else:
             candidate_bias = None
     return F.linear(input, candidate_weight, candidate_bias)
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_features):
         expected_input_dim = self.abstract_child["_in_features"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_features)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     # create the weight and bias matrix for fc1
     if not spaces.is_determined(self._hidden_multiplier):
         hmul = self.abstract_child[
             "_hidden_multiplier"].value * expected_input_dim
     else:
         hmul = spaces.get_determined_value(self._hidden_multiplier)
     hidden_dim = int(expected_input_dim * hmul)
     _fc1_weight = self.fc1_super_weight[:hidden_dim, :expected_input_dim]
     _fc1_bias = self.fc1_super_bias[:hidden_dim]
     x = F.linear(input, _fc1_weight, _fc1_bias)
     x = self.act(x)
     x = self.drop(x)
     # create the weight and bias matrix for fc2
     if not spaces.is_determined(self._out_features):
         out_dim = self.abstract_child["_out_features"].value
     else:
         out_dim = spaces.get_determined_value(self._out_features)
     _fc2_weight = self.fc2_super_weight[:out_dim, :hidden_dim]
     _fc2_bias = self.fc2_super_bias[:out_dim]
     x = F.linear(x, _fc2_weight, _fc2_bias)
     x = self.drop(x)
     return x
Esempio n. 3
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     x = input.reshape(len(input), self._d_feat, -1)  # [N, F*T] -> [N, F, T]
     x = x.permute(0, 2, 1)  # [N, F, T] -> [N, T, F]
     if not spaces.is_determined(self._embed_dim):
         embed_dim = self.abstract_child["_embed_dim"].value
     else:
         embed_dim = spaces.get_determined_value(self._embed_dim)
     out = self.proj(x) * math.sqrt(embed_dim)
     return out
Esempio n. 4
0
 def forward_candidate(
     self, q_tensor, k_tensor, v_tensor, mask=None
 ) -> torch.Tensor:
     # check the num_heads:
     if not spaces.is_determined(self._num_heads):
         num_heads = self.abstract_child["_num_heads"].value
     else:
         num_heads = spaces.get_determined_value(self._num_heads)
     feats = self.forward_qkv(q_tensor, k_tensor, v_tensor, num_heads, mask)
     outs = self.proj(feats)
     outs = self.proj_drop(outs)
     return outs
Esempio n. 5
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check the num_heads:
     if not spaces.is_determined(self._num_heads):
         num_heads = self.abstract_child["_num_heads"].value
     else:
         num_heads = spaces.get_determined_value(self._num_heads)
     feats = self.forward_qkv(input, num_heads)
     if self.proj is None:
         return feats
     else:
         outs = self.proj(feats)
         outs = self.proj_drop(outs)
         return outs
    def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
        batch, seq, fdim = input.shape[:3]
        embeddings = self.pe[:, :seq]
        if not spaces.is_determined(self._d_model):
            expected_d_model = self.abstract_child["_d_model"].value
        else:
            expected_d_model = spaces.get_determined_value(self._d_model)
        assert fdim == expected_d_model, "{:} vs {:}".format(
            fdim, expected_d_model)

        embeddings = torch.nn.functional.interpolate(embeddings,
                                                     size=(expected_d_model),
                                                     mode="linear",
                                                     align_corners=True)
        outs = self.dropout(input + embeddings)
        return outs
Esempio n. 7
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_dim):
         expected_input_dim = self.abstract_child["_in_dim"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_dim)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     if self._elementwise_affine:
         weight = self.weight[:expected_input_dim]
         bias = self.bias[:expected_input_dim]
     else:
         weight, bias = None, None
     return F.layer_norm(input, (expected_input_dim, ), weight, bias,
                         self.eps)
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     batch, flatten_size = input.shape
     feats = self.input_embed(input)  # batch * 60 * 64
     if not spaces.is_determined(self._embed_dim):
         embed_dim = self.abstract_child["_embed_dim"].value
     else:
         embed_dim = spaces.get_determined_value(self._embed_dim)
     cls_tokens = self.cls_token.expand(batch, -1, -1)
     cls_tokens = F.interpolate(cls_tokens,
                                size=(embed_dim),
                                mode="linear",
                                align_corners=True)
     feats_w_ct = torch.cat((cls_tokens, feats), dim=1)
     feats_w_tp = self.pos_embed(feats_w_ct)
     xfeats = self.backbone(feats_w_tp)
     xfeats = xfeats[:, 0, :]  # use the feature for the first token
     predicts = self.head(xfeats).squeeze(-1)
     return predicts