Пример #1
0
if os.path.isdir("../model") == False:
  os.mkdir("../model")

prepared = True
epochs = 300
device_name = "cuda"
learning_rate = 0.001
batch_size = 32
image_size = (100, 100)
model = DistributionConvolutionModelKReluGradientMuRandom(image_size=image_size)

device = torch.device(device_name)
model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

if prepared == False:
  prepare_datasets(image_size)
x_train, y_train, x_test, y_test = load_datasets()

print("Start Training")
model, history = train_model(model, criterion, optimizer, epochs, x_train, y_train, x_test, y_test, device, batch_size)

with open("../model/model_history.pkl", "wb") as f:
  pickle.dump(history, f)

plot_history(history)

torch.save({"model_state_dict": model.state_dict()}, "../model/model_final.pth")
Пример #2
0
from network import NormalConvolutionModelRelu
import pickle
from show_history import plot_history

if os.path.isdir("../model") == False:
    os.mkdir("../model")

epochs = 300
device_name = "cuda"
learning_rate = 0.001
batch_size = 128
model = NormalConvolutionModelRelu()

device = torch.device(device_name)
model.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
trainloader, testloader = get_cifar10(batch_size)

model, history = train_model(model, criterion, optimizer, epochs, trainloader,
                             testloader, device)

with open("../model/model_history.pkl", "wb") as f:
    pickle.dump(history, f)

plot_history(history)

torch.save({"model_state_dict": model.state_dict()},
           "../model/model_final.pth")
Пример #3
0
train_x, val_x, train_y, val_y = get_train_data(test_size=0.1, one_hot=False)
train_data = FashionMnist(data_type='train', data=(train_x, train_y))
val_data = FashionMnist(data_type='val', data=(val_x, val_y))
datasets = {'train': train_data, 'val': val_data}
dataloaders = {x: DataLoader(datasets[x], batch_size=args.batch_size, num_workers=4)
               for x in ['train', 'val']}


# model = wrn(depth=28, num_classes=10, widen_factor=8, drop_rate=0.5)
model = resnet18()
# model = BasicModle()
# summary(model.to(device), (1, 28, 28))

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# optimizer = optim.Adadelta(model.parameters(), weight_decay=0.02)
# optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9)

exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)
exp_lr_scheduler = None
model, best_acc = train_model(model=model,
                              device=device,
                              dataloaders=dataloaders,
                              criterion=criterion, optimizer=optimizer,
                              scheduler=exp_lr_scheduler,
                              epochs=args.epochs)

torch.save(model.state_dict(), 'result/{}_model.ckpt'.format(best_acc))