コード例 #1
0
import cv2
import torch
import numpy as np

import torch.nn as nn
import torch.nn.functional as F
from utils.tensorboard import TensorBoard
from Renderer.model import FCN
from Renderer.stroke_gen import *

writer = TensorBoard("../train_log/191104_renderer/")
import torch.optim as optim

criterion = nn.MSELoss()
net = FCN()
optimizer = optim.Adam(net.parameters(), lr=3e-6)
batch_size = 64

use_cuda = torch.cuda.is_available()
step = 0


def save_model():
    if use_cuda:
        net.cpu()
    # torch.save(net.state_dict(), "../renderer.pkl")
    torch.save(net.state_dict(), "model/renderer.pkl")
    if use_cuda:
        net.cuda()

コード例 #2
0
                      transform=img_transform)  # cluster
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

# AutoEncoder model
model = AutoEncoder().to(device)
#if os.path.exists('./AutoEncoder.pth'):
#    model.load_state_dict(torch.load('/home/so1463/LearningToPaint/baseline/AutoEncoder.pth'))

###############################
# or use the with no_grad block in our loop when we call the save_image function

# Freeze weights of the renderer
renderer = FCN().to(device)
renderer.load_state_dict(torch.load(args.renderer))
renderer = renderer.to(device).eval()
for p in renderer.parameters():
    p.requires_grad = True

# Define optimizer and loss function
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss(reduction='sum')

loss_plot = []
###############################################################################

#################################
# Training ######################
#################################
imgid = 1
for epoch in range(num_epochs):  # each training epoch