Beispiel #1
0
kernel_size = 3
stride = 1
n_fmaps = 16  # fixed in model class
n_latent = 2048
model = AE_Encoder_Classifier(
    Encoder_4_sampling_bn_1px_deep_convonly_skip(input_size,
                                                 kernel_size,
                                                 stride,
                                                 n_latent=n_latent),
    Classifier3Layered(n_latent=n_latent))

checkpoint = torch.load(state_dict_path,
                        map_location=lambda storage, loc: storage)
state_dict = checkpoint['model_state_dict']
model.load_encoder_state_dict(state_dict)
model.freeze_encoder_weights(expr=r'^.*\.encoding_conv.*$')
model.reset_state()

for name, param in model.named_parameters():
    print(name, param.requires_grad)

criterion = torch.nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.00000075)

num_epoch = 700
log_int = 5
device = 'cuda'
gpu_id = 0
save = True
save_int = 25
resume = False
Beispiel #2
0
kernel_size = 3
stride = 1
n_fmaps = 16  # fixed in model class
n_latent = 2048
model = AE_Encoder_Classifier(
    Encoder_4_sampling_1px_deep_convonly_skip(input_size,
                                              kernel_size,
                                              stride,
                                              n_latent=n_latent),
    Classifier(n_latent=n_latent))

checkpoint = torch.load(state_dict_path,
                        map_location=lambda storage, loc: storage)
state_dict = checkpoint['model_state_dict']
model.load_encoder_state_dict(state_dict)
model.freeze_encoder_weights()
model.reset_state()

for name, param in model.named_parameters():
    print(name, param.requires_grad)

criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.8)

num_epoch = 10
log_int = 1
device = 'cpu'
save = True
resume = False

trainer = Trainer(run_root=run_root,