def test_dropout_dynamic_same_result(): x = mge.ones(10) R.manual_seed(0) a = F.dropout(x, 0.5) R.manual_seed(0) b = F.dropout(x, 0.5) assert np.all(a.numpy() == b.numpy())
def forward(self, x): new_features = self.feature(x) if self.drop_rate > 0: new_features = F.dropout(new_features, drop_prob=self.drop_rate, rescale=self.training) return F.concat([x, new_features], 1)
def forward(self, x): # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 x = self.avgpool(x) # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 x = self.conv(x) # N x 128 x 4 x 4 x = x.reshape(x.shape[0], -1) # N x 2048 x = F.relu(self.fc1(x)) # N x 1024 x = F.dropout(x, 0.7) # N x 1024 x = self.fc2(x) # N x num_classes return x
def test_dropout(): data = tensor(np.ones(10, dtype=np.float32)) out = F.dropout(data, 1.0 / 3.0, training=False) assert out.numpy().sum() >= 0.0
def forward(self, inps): return F.dropout(inps[0], self.param["ratio"], self.param["training"])
def test_dropout_dynamic_diff_result(): x = mge.ones(10) a = F.dropout(x, 0.5) b = F.dropout(x, 0.5) assert np.any(a.numpy() != b.numpy())
[(32, 32, 16, 16, 16)], True, 1000, ), ( "convTranspose2d", lambda x: module_cache["ConvTranspose2d"][0](x), lambda x: module_cache["ConvTranspose2d"][1](x), [(2, 32, 16, 16)], [(32, 32, 128, 128)], True, 1000, ), ( "dropout", lambda x: MF.dropout(x, 0.5), TF.dropout, [(100, 100)], [(64, 512, 16, 16)], True, 1000, ), ( "dw_conv2d", lambda x: module_cache["dw_conv2d"][0](x), lambda x: module_cache["dw_conv2d"][1](x), [(2, 32, 16, 16)], [(32, 32, 128, 128)], True, 1000, ),