def train_encoder():
    from AutoEncoder import AutoEncoder

    device = torch.device("cuda")
    enc = AutoEncoder(3).to(device)

    criterion = nn.MSELoss()
    optimizer = optim.Adam(enc.parameters(), lr=ETA)

    loader = get_cifar10()

    for e in range(EPOCHS):
        train_loss = 0.0

        for images, _, in loader:
            images = images.to(device)

            _, decoded = enc(images)
            assert (decoded.size() == images.size())
            loss = criterion(decoded, images)
            train_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        train_loss /= len(loader)
        print(f"Epochs {e+1}/{EPOCHS}")
        print(f"Loss: {train_loss:.8f}")

    enc.save("ckpts/encoder_test.pth")
#if os.path.exists('./AutoEncoder.pth'):
#    model.load_state_dict(torch.load('/home/so1463/LearningToPaint/baseline/AutoEncoder.pth'))

# Define our RNN model
RNN = RNN().to(device)


# Freeze weights of the renderer
renderer = FCN().to(device)
renderer.load_state_dict(torch.load(args.renderer))
renderer = renderer.to(device).eval()
for p in renderer.parameters():
    p.requires_grad = True

# Define optimizer and loss function
optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
criterion = nn.MSELoss(reduction='sum')

loss_plot = []
###############################################################################


#################################
# Training ######################
#################################
imgid = 1
for epoch in range(num_epochs): # each training epoch

    for data in dataloader: # for each image
        image, _=data
Beispiel #3
0
    torchvision.transforms.ToTensor(),
    # torchvision.transforms.Normalize(0, 1),
])
train_data = torchvision.datasets.MNIST(
    root='/home/slcheng/AutoEncoder/mnist',
    train=True,
    transform=transform,
    download=DOWNLOAD_MNIST,
)
trian_loader = Data.DataLoader(dataset=train_data,
                               batch_size=args.batch_size,
                               shuffle=True)

# load model
auotencoder = AutoEncoder()
optimizer = torch.optim.Adam(auotencoder.parameters(), lr=args.learning_rate)
mse_loss = nn.MSELoss()


# traning process
def train():
    fig, ax = plt.subplots(2, N)
    ln, = plt.plot([], [], animated=True)

    # randomly sample N images to visualize
    train_num = train_data.data.shape[0]
    random.seed(train_num)
    slice = random.sample(list(range(0, train_num)), N)
    origin_image = torch.empty(N, 28 * 28)
    for i in range(N):
        origin_image[i] = train_data.train_data[slice[i]].view(-1, 28*28)\
Beispiel #4
0
N = 400
D = 20
L = 9
R = np.array(np.random.rand(N, L) > .5, dtype=np.float32)

# train_loader = torch.utils.data.DataLoader(
#     datasets.MNIST('../data', train=True, download=True,
#                    transform=transforms.ToTensor()),
#     batch_size=args.batch_size, shuffle=True, **kwargs)
# test_loader = torch.utils.data.DataLoader(
#     datasets.MNIST('../data', train=False, transform=transforms.ToTensor()),
#     batch_size=args.batch_size, shuffle=True, **kwargs)

# model = VAE().to(device)
model = AutoEncoder(9, 3).to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)


# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x):
    BCE = F.binary_cross_entropy(recon_x, x, size_average=False)

    # see Appendix B from VAE paper:
    # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
    # https://arxiv.org/abs/1312.6114
    # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    # KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())

    return BCE

if __name__ == '__main__':
    if __DEBUG__ == True:
        print(DEVICE)
    train = get_dataset()
    if False:
        print("Showing Random images from dataset")
        showRandomImaged(train)

    model = AutoEncoder().cuda() if torch.cuda.is_available() else AutoEncoder(
    )
    if __DEBUG__ == True:
        print(model)

    criterian = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), weight_decay=1e-5)

    if LOAD == True:
        model.load_state_dict(torch.load(PATH))

    for epoch in range(EPOCHS):
        for i, (images, _) in enumerate(train):
            images = images.to(DEVICE)
            out = model(images)
            loss = criterian(out, images)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            ## LOG
            print(f"epoch {epoch}/{EPOCHS}\nLoss : {loss.data}")
Beispiel #6
0
model = AutoEncoder().to(device)
#if os.path.exists('./AutoEncoder.pth'):
#    model.load_state_dict(torch.load('/home/so1463/LearningToPaint/baseline/AutoEncoder.pth'))

###############################
# or use the with no_grad block in our loop when we call the save_image function

# Freeze weights of the renderer
renderer = FCN().to(device)
renderer.load_state_dict(torch.load(args.renderer))
renderer = renderer.to(device).eval()
for p in renderer.parameters():
    p.requires_grad = True

# Define optimizer and loss function
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
criterion = nn.MSELoss(reduction='sum')

loss_plot = []
###############################################################################

#################################
# Training ######################
#################################
imgid = 1
for epoch in range(num_epochs):  # each training epoch

    for data in dataloader:  # for each image
        optimizer.zero_grad()  # zeros the gradient, must do every iteration
        image, _ = data