def train(): train_set = torchvision.datasets.FashionMNIST( root='./data/FashionMNIST', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor() ]) ) params = OrderedDict( lr=[0.01, 0.001], batch_size=[100, 1000], shuffle=[True, False], num_workers=[2] ) runManager = RunManager() for run in RunBuilder.get_runs(params): network = finalModel() loader = DataLoader( train_set, batch_size=run.batch_size, shuffle=run.shuffle, num_workers=run.num_workers) optimizer = optim.Adam(network.parameters(), lr=run.lr) runManager.begin_run(run, network, loader) for epoch in range(10): runManager.begin_epoch() for batch in loader: images, labels = batch # support computation based on device type images = images.to(get_device_type()) labels = labels.to(get_device_type()) preds = network(images) loss = F.cross_entropy(preds, labels) optimizer.zero_grad() loss.backward() optimizer.step() runManager.track_loss(loss) runManager.track_num_correct(preds, labels) runManager.end_epoch() runManager.end_run() runManager.save('results')
print(f"{splits}") train_set, val_set = dataset.random_split(init_train_data, splits) for run in RunBuilder.get_runs(params): network = MyConvNet() loader = DataLoader(train_set, batch_size=run.batch_size, num_workers=run.num_workers, shuffle=run.shuffle) optimizer = optim.Adam(network.parameters(), lr=run.lr) m.begin_run(run, network, loader) for epoch in range(50): m.begin_epoch() for batch in loader: print(m.epoch_count) images, labels = batch preds = network(images) loss = F.cross_entropy(preds, labels) optimizer.zero_grad() loss.backward() optimizer.step() m.track_loss(loss) m.track_num_correct(preds, labels) m.end_epoch() m.end_run() m.save('results')