state = json.load(open("working_model/state.json", "r"))
pred_real_history += state["pred_real"]
pred_fake_history += state["pred_fake"]
visualizer.point = state["point"]

if settings.WORKING_MODEL:
    print("Using model parameters in ./working_model")
    G.load_state_dict(torch.load("working_model/G.params"))
    D.load_state_dict(torch.load("working_model/D.params"))
    E.load_state_dict(torch.load("working_model/E.params"))

    toRGB.load_state_dict(torch.load("working_model/toRGB6.params"))
    fromRGB.load_state_dict(torch.load("working_model/fromRGB6.params"))
    print("Loaded RGB layers too")

dataset = u.get_data_set()
data_loader = torch.utils.data.DataLoader(dataset,
                                          num_workers=8,
                                          batch_size=settings.BATCH_SIZE,
                                          shuffle=True,
                                          pin_memory=True,
                                          drop_last=True)


def update_visualization(visualizer, batch, fake, decoded, pred_fake,
                         pred_real):
    # TODO move this to utils or visualizer to save code
    batch_shape = list(batch.shape)
    batch_shape[1] = 1

    visualizer.update_image(batch[0][0].data.cpu(), "real_img")
def main():
    print("\nInitiating training with the following setting ----")
    print(json.dumps(vars(settings.args), sort_keys=True, indent=4))
    print("---------------------------------------------------")
    # Get utilities ---------------------------------------------------
    dataset = u.get_data_set()
    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=settings.BATCH_SIZE,
                                              shuffle=True,
                                              pin_memory=True,
                                              drop_last=True)
    visualizer = Visualizer()
    state = json.load(open("working_model/state.json", "r"))
    visualizer.point = state["point"]

    # Define networks -------------------------------------------------
    G = u.create_generator()
    D = u.create_discriminator()

    if settings.EQUALIZE_WEIGHTS:
        ws.scale_network(D, 0.2)
        ws.scale_network(G, 0.2)

    if settings.SPECTRAL_NORM:
        sn.normalize_network(D, 0.2)

    if settings.WORKING_MODEL:
        print("Using model parameters in ./working_model")
        G.load_state_dict(torch.load("working_model/G.params"))
        D.load_state_dict(torch.load("working_model/D.params"))

    # Train with StageTrainer or FadeInTrainer
    s, (c, d) = [settings.STAGE, settings.PROGRESSION[settings.STAGE]]
    if settings.FADE_IN:
        print("Freezing in next layer")
        c = settings.PROGRESSION[settings.STAGE + 1][0]
        d = int(d / 2)
        G.freeze_until(s)
        #D.freeze_until(s)
        s += 1

    # Freeze idle layers - did not stop vlad
    #G.freeze_idle(s)
    #D.freeze_idle(s)

    stage = trainer.StageTrainer(G,
                                 D,
                                 data_loader,
                                 stage=s,
                                 conversion_depth=c,
                                 downscale_factor=d)
    stage.pred_real += state["pred_real"]
    stage.pred_fake += state["pred_fake"]

    if settings.WORKING_MODEL:
        stage.toRGB.load_state_dict(
            torch.load("working_model/toRGB{}.params".format(s)))
        stage.fromRGB.load_state_dict(
            torch.load("working_model/fromRGB{}.params".format(s)))
        print("Loaded RGB layers too")

    stage.visualize(visualizer)
    for i in range(settings.CHUNKS):
        print("Chunk {}, stage {}, fade in: {}, GPU memory {}               ".
              format(i, settings.STAGE, settings.FADE_IN, 1337))
        stage.steps(settings.STEPS)
        gc.collect()  # Prevent memory leaks (?)
        #torch.cuda.empty_cache()  - Made no difference
        state["history_real"].append(float(stage.pred_real))
        state["history_fake"].append(float(stage.pred_fake))
        if settings.WORKING_MODEL:
            print("Saved timelapse visualization")
            stage.save_fake_reference_batch(visualizer.point)
        stage.visualize(visualizer)

    # Save networks
    """
    if settings.FADE_IN:
        to_rgb, from_rgb, next_to_rgb, next_from_rgb = stage.get_rgb_layers()
        print("Saving extra rgb layers, {}".format(time.ctime()))
        torch.save(next_to_rgb.state_dict(), "working_model/toRGB{}.params".format(s + 1))
        torch.save(next_from_rgb.state_dict(), "working_model/fromRGB{}.params".format(s + 1))
    else:
        to_rgb, from_rgb = stage.get_rgb_layers()
    """
    to_rgb, from_rgb = stage.get_rgb_layers()
    print("Saving rgb layers, {}".format(time.ctime()))

    torch.save(to_rgb.state_dict(), "working_model/toRGB{}.params".format(s))
    torch.save(from_rgb.state_dict(),
               "working_model/fromRGB{}.params".format(s))
    print("Saving networks, {}".format(time.ctime()))
    G.unfreeze_all()
    D.unfreeze_all()
    torch.save(G.state_dict(), "working_model/G.params")
    torch.save(D.state_dict(), "working_model/D.params")

    # Save state
    state["point"] = visualizer.point
    state["pred_real"] = float(stage.pred_real)
    state["pred_fake"] = float(stage.pred_fake)
    print("Saving state, {}".format(time.ctime()))
    json.dump(state, open("working_model/state.json", "w"))

    # Save optimizer state
    #opt_G = stage.opt_G
    #opt_D = stage.opt_D

    #print("Saving optimizer state, {}".format(time.ctime()))
    #torch.save(opt_G.state_dict(), "working_model/optG.state")
    #torch.save(opt_D.state_dict(), "working_model/optD.state")
    print("Finished with main")
import time
import json

import torch
import torch.nn as nn
import torch.nn.functional as F

import utils.datasets as datasets
import utils.visualizer as vis
import settings
import utils.utils as u

from utils.utils import cyclic_data_iterator
from torch.autograd import Variable

dataset = u.get_data_set()
if settings.GENERATED_PATH is not None:
    dataset = datasets.GeneratedWithMaps(settings.GENERATED_PATH)
    print("Using generated data set at: {}".format(settings.GENERATED_PATH))
    if settings.CONCAT_DATA:
        dataset = torch.utils.data.ConcatDataset([dataset, u.get_data_set()])
data_loader = torch.utils.data.DataLoader(dataset,
                                          batch_size=settings.BATCH_SIZE,
                                          shuffle=True,
                                          pin_memory=True,
                                          drop_last=True)

R = u.create_regressor()
criterion = nn.BCEWithLogitsLoss()

if settings.CUDA: