コード例 #1
0
def _test_each_optimizer(cfg):
    print("Solver: " + str(cfg.SOLVER.OPTIMIZER))

    model = TestArch()
    criterion = torch.nn.BCEWithLogitsLoss()
    optimizer = build_optimizer_mapper(cfg, model)
    optimizer.zero_grad()

    random.seed(20210912)
    for _ in range(2500):
        target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))
        x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)
        y_pred = model(x)
        loss = criterion(y_pred, target)
        loss.backward()
        optimizer.step()

    n_correct = 0
    for _ in range(200):
        target = torch.empty(1, 1, 1, 1).fill_(random.randint(0, 1))
        x = torch.add(torch.rand(1, 3, 16, 16), 2 * target)
        y_pred = torch.round(torch.sigmoid(model(x)))
        if y_pred == target:
            n_correct += 1

    print("Correct prediction rate {0}.".format(n_correct / 200))
コード例 #2
0
    def test_create_optimizer_weight_decay_norm(self):
        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.conv = torch.nn.Conv2d(3, 3, 1)
                self.bn = torch.nn.BatchNorm2d(3)

            def forward(self, x):
                return self.bn(self.conv(x))

        model = Model()
        cfg = get_optimizer_cfg(lr=1.0,
                                weight_decay=1.0,
                                weight_decay_norm=2.0,
                                weight_decay_bias=1.0)
        optimizer = build_optimizer_mapper(cfg, model)

        self.assertEqual(len(optimizer.param_groups), 2)

        _check_param_group(self,
                           optimizer.param_groups[0],
                           num_params=2,
                           lr=1.0,
                           weight_decay=1.0)
        _check_param_group(self,
                           optimizer.param_groups[1],
                           num_params=2,
                           lr=1.0,
                           weight_decay=2.0)
コード例 #3
0
def _test_each_optimizer(cfg):
    model = TestArch()
    optimizer = build_optimizer_mapper(cfg, model)
    optimizer.zero_grad()
    for _ in range(10):
        x = torch.rand(1, 3, 24, 24)
        y = model(x)
        loss = y.mean()
        loss.backward()
        optimizer.step()
コード例 #4
0
    def test_create_optimizer_custom_ddp(self):
        class Model(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.conv = torch.nn.Conv2d(3, 3, 1)
                self.bn = torch.nn.BatchNorm2d(3)

            def forward(self, x):
                return self.bn(self.conv(x))

            def get_optimizer_param_groups(self, _opts):
                ret = [{
                    "params": [self.conv.weight],
                    "lr": 10.0,
                }]
                return ret

        model = Model()
        model = torch.nn.parallel.DistributedDataParallel(model)
        cfg = get_optimizer_cfg(lr=1.0,
                                weight_decay=1.0,
                                weight_decay_norm=0.0)
        optimizer = build_optimizer_mapper(cfg, model)

        self.assertEqual(len(optimizer.param_groups), 3)

        _check_param_group(self,
                           optimizer.param_groups[0],
                           num_params=1,
                           lr=10.0,
                           weight_decay=1.0)
        _check_param_group(self,
                           optimizer.param_groups[1],
                           num_params=1,
                           lr=1.0,
                           weight_decay=1.0)
        _check_param_group(self,
                           optimizer.param_groups[2],
                           num_params=2,
                           lr=1.0,
                           weight_decay=0.0)
コード例 #5
0
ファイル: default_runner.py プロジェクト: ananthsub/d2go
 def build_optimizer(self, cfg, model):
     return build_optimizer_mapper(cfg, model)