Beispiel #1
0
    def __init__(self, encode_size, hidden_size, num_layers, bs, output_size):
        super().__init__()
        self.encode_size = encode_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bs = bs
        self.output_size = output_size

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.encoder_linear = nn.Sequential(nn.BatchNorm1d(1024),
                                            nn.Dropout(p=0.25),
                                            nn.Linear(1024, 512), nn.ReLU(),
                                            nn.BatchNorm1d(512),
                                            nn.Dropout(p=0.5),
                                            nn.Linear(512,
                                                      encode_size)).cuda()
        set_trainable(self.encoder_linear, True)  # fastai fit bug

        self.lstm = nn.LSTM(encode_size, hidden_size, num_layers).cuda()
        self.h, self.c = self.init_hidden()
        set_trainable(self.lstm, True)  # fastai fit bug

        self.linear = nn.Linear(hidden_size, output_size).cuda()
        set_trainable(self.linear, True)  # fastai fit bug

        self.init_weights()
Beispiel #2
0
    def __init__(self):
        super().__init__()

        resnet_model = resnet34(pretrained=True)
        encoder_layers = list(resnet_model.children())[:8] + [
            AdaptiveConcatPool2d(), Flatten()
        ]
        self.encoder = nn.Sequential(*encoder_layers).cuda()
        for param in self.encoder.parameters():
            param.requires_grad = False
        set_trainable(self.encoder, False)  # fastai fit bug

        self.linear = nn.Sequential(nn.BatchNorm1d(1024), nn.Dropout(p=0.25),
                                    nn.Linear(1024, 512), nn.ReLU(),
                                    nn.BatchNorm1d(512), nn.Dropout(p=0.5),
                                    nn.Linear(512, 14)).cuda()
        apply_init(self.linear, kaiming_normal)
        set_trainable(self.linear, True)  # fastai fit bug
Beispiel #3
0
def test_set_trainable():
    layer1 = torch.nn.Linear(2, 2)
    layer2 = torch.nn.Linear(2, 1)
    model = torch.nn.Sequential(layer1, layer2)

    params_require_grad_before = list(
        filter(lambda param: param.requires_grad == True, model.parameters()))

    core.set_trainable(model, False)

    params_require_grad_after = list(
        filter(lambda param: param.requires_grad == True, model.parameters()))

    assert len(params_require_grad_before) == 4
    assert len(params_require_grad_after) == 0

    assert model.trainable == False
    assert layer1.trainable == False
    assert layer2.trainable == False
Beispiel #4
0
def test_set_trainable():
  layer1 = torch.nn.Linear(2, 2)
  layer2 = torch.nn.Linear(2, 1)
  model = torch.nn.Sequential(layer1, layer2)

  params_require_grad_before = list(filter(lambda param: param.requires_grad == True,
                                    model.parameters()))

  core.set_trainable(model, False)

  params_require_grad_after = list(filter(lambda param: param.requires_grad == True,
                                    model.parameters()))

  assert len(params_require_grad_before) == 4
  assert len(params_require_grad_after) == 0

  assert model.trainable == False
  assert layer1.trainable == False
  assert layer2.trainable == False

next(iter(trn_dl))[0].size()


# In[17]:


learner.get_layer_groups()


# In[18]:


from fastai.core import set_trainable
set_trainable(learner.model, False)
set_trainable(learner.get_layer_groups()[0], True)
set_trainable(learner.get_layer_groups()[-1], True)
assert learner.model[0].blocks[0].trainable == False
assert learner.model[0].blocks[0].attn.c_proj.weight.requires_grad == False
assert learner.model[1].weight.requires_grad == True
assert learner.model[1].trainable == True
assert learner.model[0].embed.trainable == True
assert learner.model[0].embed.weight.requires_grad == True


# In[19]:


lr=1e-3
lrs = lr