Esempio n. 1
0
            'conv2_1': 9,
            'conv2_2': 12,
            'conv3_1': 16,
            'conv3_2': 19,
            'conv3_3': 22,
            'conv3_4': 25,
            'conv4_1': 29,
        })
    torch.save(e4.state_dict(), 'pth_models/vgg_normalised_conv4.pth')

    ## VGGDecoder4
    inv4 = load_lua('models/feature_invertor_conv4_1_mask.t7')
    d4 = VGGDecoder(4)
    weight_assign(
        inv4, d4, {
            'conv4_1': 1,
            'conv3_4': 5,
            'conv3_3': 8,
            'conv3_2': 11,
            'conv3_1': 14,
            'conv2_2': 18,
            'conv2_1': 21,
            'conv1_2': 25,
            'conv1_1': 28,
        })
    torch.save(d4.state_dict(), 'pth_models/feature_invertor_conv4.pth')

    p_wct = PhotoWCT()
    photo_wct_loader(p_wct)
    torch.save(p_wct.state_dict(), 'PhotoWCTModels/photo_wct.pth')
Esempio n. 2
0
        'conv1_2': 5,
        'conv2_1': 9,
        'conv2_2': 12,
        'conv3_1': 16,
        'conv3_2': 19,
        'conv3_3': 22,
        'conv3_4': 25,
        'conv4_1': 29,
    })
    torch.save(e4.state_dict(), 'pth_models/vgg_normalised_conv4.pth')
    
    ## VGGDecoder4
    inv4 = load_lua('models/feature_invertor_conv4_1_mask.t7')
    d4 = VGGDecoder(4)
    weight_assign(inv4, d4, {
        'conv4_1': 1,
        'conv3_4': 5,
        'conv3_3': 8,
        'conv3_2': 11,
        'conv3_1': 14,
        'conv2_2': 18,
        'conv2_1': 21,
        'conv1_2': 25,
        'conv1_1': 28,
    })
    torch.save(d4.state_dict(), 'pth_models/feature_invertor_conv4.pth')
    
    p_wct = PhotoWCT()
    photo_wct_loader(p_wct)
    torch.save(p_wct.state_dict(), 'PhotoWCTModels/photo_wct.pth')
Esempio n. 3
0
set_parameter_requires_grad(model.e2, False)
set_parameter_requires_grad(model.e3, False)
set_parameter_requires_grad(model.e4, False)

# transfer model to GPU if you have one
model.to(device)

# set criterion to reconstruction loss & define optimizer
criterion = WeightedMseContentLoss(content_loss_weight=1, mse_loss_weight=1700)
#criterion = ContentLoss() # MSELoss() # alternatives to the above dissimilarity constraint

lr = 0.0001
optimizer = optim.Adam(model.parameters(), lr=lr)

print("Criterion: MSE & Perceptual loss 'r42', weights 1700 & 1 respectively")
print('Optimizer: Adam | Learning rate = %f' % lr)

t0 = perf_counter()
train_model(model, data_loader, criterion, optimizer)
t1 = perf_counter()
time_elapsed = t1 - t0
t_hrs = time_elapsed // 3600
t_min = (time_elapsed - t_hrs * 3600) // 60
t_sec = (time_elapsed - t_hrs * 3600 - t_min * 60)

print("Time elapsed = %d hours, %d minutes and %d seconds (%d seconds)" %
      (t_hrs, t_min, t_sec, time_elapsed))
# save the model
save_dir = './PhotoWCTModels/'
torch.save(model.state_dict(), save_dir + 'cyclic_photo_wct_2maps.pth')