Exemple #1
0
            torch.save(model.state_dict(),
                       BASE_PATH / f"model_state_dict{str(epoch_i)}.pth")

        # if False and LOWEST_L > test_accum_loss:
        #    LOWEST_L = test_accum_loss
        #    torch.save(model.state_dict(), BASE_PATH / f"best_state_dict.pth")


if __name__ == "__main__":

    CONFIG = NOD()
    CONFIG.seed = 58329583
    CONFIG.epochs = 1000
    CONFIG.batch_size = 256
    CONFIG.learning_rate = 0.001
    CONFIG.encoder_layer_sizes = [784, 256]
    CONFIG.decoder_layer_sizes = [256, 784]
    CONFIG.latent_size = 10
    CONFIG.print_every = 100
    GLOBAL_DEVICE = global_torch_device()
    TIMESTAMP = time.time()

    LOWEST_L = inf

    CORE_COUNT = 0  # min(8, multiprocessing.cpu_count() - 1)

    GLOBAL_DEVICE = torch.device(
        "cuda" if torch.cuda.is_available() else "cpu")
    DL_KWARGS = ({
        "num_workers": CORE_COUNT,
        "pin_memory": True
Exemple #2
0
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import MNIST
from warg import NOD

from draugr.torch_utilities import global_torch_device
from .objectives import loss_fn

fig_root = PROJECT_APP_PATH.user_data / "cvae"

config = NOD()
config.seed = 58329583
config.epochs = 1000
config.batch_size = 256
config.learning_rate = 0.001
config.encoder_layer_sizes = [784, 256]
config.decoder_layer_sizes = [256, 784]
config.latent_size = 10
config.print_every = 100
GLOBAL_DEVICE = global_torch_device()
timstamp = time.time()
torch.manual_seed(config.seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(config.seed)

vae = ConditionalVAE(
    encoder_layer_sizes=config.encoder_layer_sizes,
    latent_size=config.latent_size,
    decoder_layer_sizes=config.decoder_layer_sizes,
    num_conditions=10,
).to(global_torch_device())