model_names = ['GoogLeNet', 'Pretrained GoogLeNet'] # models = [googlenet_pre] # model_names = ['Pretrained GoogLeNet'] #%% criterion = nn.CrossEntropyLoss() optimizers = [] for model in models: optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) optimizers.append(optimizer) #%% from chosen_gpu import get_freer_gpu device = torch.device( get_freer_gpu()) if torch.cuda.is_available() else torch.device("cpu") print("Configured device: ", device) #%% for model in models: model = model.to(device) criterion = criterion.to(device) #%% def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad)
from torchvision.utils import save_image from visdom import Visdom from modules import VAE from train_test import train from train_test import test import utils #%% log_interval = 100 seed = 1 torch.manual_seed(seed) from chosen_gpu import get_freer_gpu device = torch.device(get_freer_gpu()) print("Configured device: ", device) #%% compose = transforms.Compose([ transforms.Resize((64, 64)), transforms.ToTensor(), #transforms.Normalize((.5, .5, .5), (.5, .5, .5)) ]) ds = torchvision.datasets.ImageFolder(root='dataset/', transform=compose) ratio = [int(len(ds) * 0.7), len(ds) - int(len(ds) * 0.7)] train_dataset, test_dataset = torch.utils.data.random_split(ds, ratio)