コード例 #1
0
ファイル: config.py プロジェクト: dsparber/shapemodeling
def get_model():
    n = 22779 if high_res else 2319
    saved_model = 'model_high_res.pt' if high_res else 'model.pt'
    model = Autoencoder(n)
    model.load_state_dict(torch.load(saved_model))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    return model
コード例 #2
0
ファイル: train.py プロジェクト: dsparber/shapemodeling
pretrained_model = 'checkpoints/model-300.pt'

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    dataset = FaceDataset(data_path)
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=batch_size,
                                              shuffle=True)

    num_points = dataset.num_points()

    model = Autoencoder(num_points)
    if pretrained_model is not None:
        model.load_state_dict(torch.load(pretrained_model))
    model = model.to(device)
    loss_fn = MSELoss()
    optimizer = torch.optim.Adamax(model.parameters(),
                                   lr=learning_rate,
                                   eps=1e-7)

    for epoch in range(epochs):
        for batch_id, x in enumerate(data_loader):
            x = x.to(device)
            x_hat = model.forward(x)
            criterion = loss_fn(x, x_hat)

            optimizer.zero_grad()
            criterion.backward()
            optimizer.step()
コード例 #3
0
import torch
import glob
import os
import argparse
from model import Autoencoder

model = Autoencoder(2319)
model.load_state_dict(torch.load('model.pt'))
model = model.to('cpu')
torch.save(model.state_dict(), 'model.pt')