示例#1
0
    def test_model(self, model, loss):
        data_loaders = game_data_loaders()
        train_loaders, val_loaders = data_loaders['train'], data_loaders['val']

        print('num_batches_train:', len(train_loaders))
        print('num_batches_val:', len(val_loaders))
        print('x_batch_shape:', next(iter(train_loaders))[0].shape)
        print('y_batch_shape:', next(iter(train_loaders))[1].shape)

        # input
        x = next(iter(train_loaders))

        # test model
        out_puts = model(x)
        loss(x, *out_puts)
        self.assertEqual(True, True, msg='Equal')
#!/usr/bin/env python
# coding: utf-8

from torch import optim

from src.args import args
from src.features_extraction import WassersteinAE, WassersteinAELossFunction
from src.utils import get_fixed_hyper_param, train_ae, get_device, game_data_loaders

device = get_device()
batch_size, num_of_channels, input_size, z_dim = get_fixed_hyper_param(
    args['hyper_parameters'])
reg_weight = args['wasserstein_ae']['reg_weight']

DO_TRAIN = True

model = WassersteinAE(z_dim, num_of_channels, input_size).to(device)
loss = WassersteinAELossFunction(reg_weight)
optimizer = optim.Adam(model.parameters())

dataloaders = game_data_loaders()
train_loaders, val_loaders = dataloaders['train'], dataloaders['val']

if DO_TRAIN:
    num_epochs = int(3e3)
    train_ae(num_epochs, model, dataloaders['train'], dataloaders['val'],
             optimizer, device, loss)