Пример #1
0
                                   batch_size=batch_size,
                                   shuffle=True,
                                   num_workers=num_workers)
    for x in ['train', 'val']
}

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),
                       lr=learning_rate,
                       weight_decay=weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                 'min',
                                                 verbose=True)

model = model.to(device)
model = train(model,
              data_loader,
              criterion,
              optimizer,
              scheduler,
              num_epochs=num_epoches)

n_datapoints = len(model.train_epoch_loss)

# save it in a dictionary, recommended to export by having save_csv = True
d = {
    'neuralnet':
    model.name,
    'lr':
    learning_rate,
Пример #2
0
net = AlexNet(5)

checkpoint_path = 'cifar_10_alexnet.t7'
checkpoint = torch.load(checkpoint_path,map_location=torch.device('cpu'))
try:
    net.load_state_dict(checkpoint['net'])
except:
    new_check_point = OrderedDict()
    for k, v in checkpoint['net'].items():
        name = k[7:]  # remove `module.`
        # name = k[9:]  # remove `module.1.`
        new_check_point[name] = v
    net.load_state_dict(new_check_point)


net = net.to(device)

if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True



criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)

#
# # Training
# def main():
#     AV = [[] for _ in range(5)]
#     MAV = [[] for _ in range(5)]
Пример #3
0
from alexnet import AlexNet
from utils import cifar10_loader, device

trainloader = cifar10_loader(train=True)
testloader = cifar10_loader(train=False)
writer = SummaryWriter("./logs")

epochs = 100
batch_size = 128
log_batch = 200
train_metrics = []
test_metrics = []

net = AlexNet()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)


def train():
    for epoch in range(epochs):
        running_loss = 0.0
        correct_classified = 0
        total = 0
        start_time = time.time()
        for i, data in enumerate(trainloader):
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)