def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_features):
         expected_input_dim = self.abstract_child["_in_features"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_features)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     # create the weight matrix
     if not spaces.is_determined(self._out_features):
         out_dim = self.abstract_child["_out_features"].value
     else:
         out_dim = spaces.get_determined_value(self._out_features)
     candidate_weight = self._super_weight[:out_dim, :expected_input_dim]
     # create the bias matrix
     if not spaces.is_determined(self._bias):
         if self.abstract_child["_bias"].value:
             candidate_bias = self._super_bias[:out_dim]
         else:
             candidate_bias = None
     else:
         if spaces.get_determined_value(self._bias):
             candidate_bias = self._super_bias[:out_dim]
         else:
             candidate_bias = None
     return F.linear(input, candidate_weight, candidate_bias)
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_features):
         expected_input_dim = self.abstract_child["_in_features"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_features)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     # create the weight and bias matrix for fc1
     if not spaces.is_determined(self._hidden_multiplier):
         hmul = self.abstract_child[
             "_hidden_multiplier"].value * expected_input_dim
     else:
         hmul = spaces.get_determined_value(self._hidden_multiplier)
     hidden_dim = int(expected_input_dim * hmul)
     _fc1_weight = self.fc1_super_weight[:hidden_dim, :expected_input_dim]
     _fc1_bias = self.fc1_super_bias[:hidden_dim]
     x = F.linear(input, _fc1_weight, _fc1_bias)
     x = self.act(x)
     x = self.drop(x)
     # create the weight and bias matrix for fc2
     if not spaces.is_determined(self._out_features):
         out_dim = self.abstract_child["_out_features"].value
     else:
         out_dim = spaces.get_determined_value(self._out_features)
     _fc2_weight = self.fc2_super_weight[:out_dim, :hidden_dim]
     _fc2_bias = self.fc2_super_bias[:out_dim]
     x = F.linear(x, _fc2_weight, _fc2_bias)
     x = self.drop(x)
     return x
Esempio n. 3
0
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     space = self.proj.abstract_search_space
     if not spaces.is_determined(space):
         root_node.append("proj", space)
     if not spaces.is_determined(self._embed_dim):
         root_node.append("_embed_dim", self._embed_dim.abstract(reuse_last=True))
     return root_node
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     space_fc1 = self.fc1.abstract_search_space
     space_fc2 = self.fc2.abstract_search_space
     if not spaces.is_determined(space_fc1):
         root_node.append("fc1", space_fc1)
     if not spaces.is_determined(space_fc2):
         root_node.append("fc2", space_fc2)
     return root_node
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     if not spaces.is_determined(self._in_features):
         root_node.append("_in_features",
                          self._in_features.abstract(reuse_last=True))
     if not spaces.is_determined(self._out_features):
         root_node.append("_out_features",
                          self._out_features.abstract(reuse_last=True))
     if not spaces.is_determined(self._bias):
         root_node.append("_bias", self._bias.abstract(reuse_last=True))
     return root_node
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     if not spaces.is_determined(self._embed_dim):
         root_node.append("_embed_dim",
                          self._embed_dim.abstract(reuse_last=True))
     xdict = dict(
         input_embed=self.input_embed.abstract_search_space,
         pos_embed=self.pos_embed.abstract_search_space,
         backbone=self.backbone.abstract_search_space,
         head=self.head.abstract_search_space,
     )
     for key, space in xdict.items():
         if not spaces.is_determined(space):
             root_node.append(key, space)
     return root_node
Esempio n. 7
0
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     space_q = self.q_fc.abstract_search_space
     space_k = self.k_fc.abstract_search_space
     space_v = self.v_fc.abstract_search_space
     space_proj = self.proj.abstract_search_space
     if not spaces.is_determined(self._num_heads):
         root_node.append("_num_heads", self._num_heads.abstract(reuse_last=True))
     if not spaces.is_determined(space_q):
         root_node.append("q_fc", space_q)
     if not spaces.is_determined(space_k):
         root_node.append("k_fc", space_k)
     if not spaces.is_determined(space_v):
         root_node.append("v_fc", space_v)
     if not spaces.is_determined(space_proj):
         root_node.append("proj", space_proj)
     return root_node
Esempio n. 8
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     x = input.reshape(len(input), self._d_feat, -1)  # [N, F*T] -> [N, F, T]
     x = x.permute(0, 2, 1)  # [N, F, T] -> [N, T, F]
     if not spaces.is_determined(self._embed_dim):
         embed_dim = self.abstract_child["_embed_dim"].value
     else:
         embed_dim = spaces.get_determined_value(self._embed_dim)
     out = self.proj(x) * math.sqrt(embed_dim)
     return out
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     for index, module in enumerate(self):
         if not isinstance(module, SuperModule):
             continue
         space = module.abstract_search_space
         if not spaces.is_determined(space):
             root_node.append(str(index), space)
     return root_node
Esempio n. 10
0
 def forward_candidate(
     self, q_tensor, k_tensor, v_tensor, mask=None
 ) -> torch.Tensor:
     # check the num_heads:
     if not spaces.is_determined(self._num_heads):
         num_heads = self.abstract_child["_num_heads"].value
     else:
         num_heads = spaces.get_determined_value(self._num_heads)
     feats = self.forward_qkv(q_tensor, k_tensor, v_tensor, num_heads, mask)
     outs = self.proj(feats)
     outs = self.proj_drop(outs)
     return outs
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     xdict = dict(
         mha=self.mha.abstract_search_space,
         norm1=self.norm1.abstract_search_space,
         mlp=self.mlp.abstract_search_space,
         norm2=self.norm2.abstract_search_space,
     )
     for key, space in xdict.items():
         if not spaces.is_determined(space):
             root_node.append(key, space)
     return root_node
Esempio n. 12
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check the num_heads:
     if not spaces.is_determined(self._num_heads):
         num_heads = self.abstract_child["_num_heads"].value
     else:
         num_heads = spaces.get_determined_value(self._num_heads)
     feats = self.forward_qkv(input, num_heads)
     if self.proj is None:
         return feats
     else:
         outs = self.proj(feats)
         outs = self.proj_drop(outs)
         return outs
Esempio n. 13
0
    def test_determined_and_has(self):
        # Test Non-nested Space
        space = Categorical(1, 2, 3, 4)
        self.assertFalse(space.determined)
        self.assertTrue(space.has(2))
        self.assertFalse(space.has(6))
        space = Categorical(4)
        self.assertTrue(space.determined)

        space = Continuous(0.11, 0.12)
        self.assertTrue(space.has(0.115))
        self.assertFalse(space.has(0.1))
        self.assertFalse(space.determined)
        space = Continuous(0.11, 0.11)
        self.assertTrue(space.determined)

        # Test Nested Space
        space_1 = Categorical(1, 2, 3, 4)
        space_2 = Categorical(1)
        nested_space = Categorical(space_1)
        self.assertFalse(nested_space.determined)
        self.assertTrue(nested_space.has(4))
        nested_space = Categorical(space_2)
        self.assertTrue(nested_space.determined)

        # Test Nested Space 2
        nested_space = Categorical(
            Categorical(1, 2, 3),
            Categorical(4, Categorical(5, 6, 7, Categorical(8, 9), 10), 11),
            12,
        )
        print("\nThe nested search space:\n{:}".format(nested_space))
        for i in range(1, 13):
            self.assertTrue(nested_space.has(i))

        # Test Simple Op
        self.assertTrue(is_determined(1))
        self.assertFalse(is_determined(nested_space))
    def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
        batch, seq, fdim = input.shape[:3]
        embeddings = self.pe[:, :seq]
        if not spaces.is_determined(self._d_model):
            expected_d_model = self.abstract_child["_d_model"].value
        else:
            expected_d_model = spaces.get_determined_value(self._d_model)
        assert fdim == expected_d_model, "{:} vs {:}".format(
            fdim, expected_d_model)

        embeddings = torch.nn.functional.interpolate(embeddings,
                                                     size=(expected_d_model),
                                                     mode="linear",
                                                     align_corners=True)
        outs = self.dropout(input + embeddings)
        return outs
Esempio n. 15
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     # check inputs ->
     if not spaces.is_determined(self._in_dim):
         expected_input_dim = self.abstract_child["_in_dim"].value
     else:
         expected_input_dim = spaces.get_determined_value(self._in_dim)
     if input.size(-1) != expected_input_dim:
         raise ValueError(
             "Expect the input dim of {:} instead of {:}".format(
                 expected_input_dim, input.size(-1)))
     if self._elementwise_affine:
         weight = self.weight[:expected_input_dim]
         bias = self.bias[:expected_input_dim]
     else:
         weight, bias = None, None
     return F.layer_norm(input, (expected_input_dim, ), weight, bias,
                         self.eps)
Esempio n. 16
0
 def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
     batch, flatten_size = input.shape
     feats = self.input_embed(input)  # batch * 60 * 64
     if not spaces.is_determined(self._embed_dim):
         embed_dim = self.abstract_child["_embed_dim"].value
     else:
         embed_dim = spaces.get_determined_value(self._embed_dim)
     cls_tokens = self.cls_token.expand(batch, -1, -1)
     cls_tokens = F.interpolate(cls_tokens,
                                size=(embed_dim),
                                mode="linear",
                                align_corners=True)
     feats_w_ct = torch.cat((cls_tokens, feats), dim=1)
     feats_w_tp = self.pos_embed(feats_w_ct)
     xfeats = self.backbone(feats_w_tp)
     xfeats = xfeats[:, 0, :]  # use the feature for the first token
     predicts = self.head(xfeats).squeeze(-1)
     return predicts
 def abstract_search_space(self):
     root_node = spaces.VirtualNode(id(self))
     if not spaces.is_determined(self._d_model):
         root_node.append("_d_model",
                          self._d_model.abstract(reuse_last=True))
     return root_node