def main():
    model = Net()
    if torch.cuda.is_available():
        model.cuda()
    else:
        pass
    model.apply(weights_init)

    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'"
                  .format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # 数据处理
    # 直接在train里面处理
    # dataParser = DataParser(batch_size)
    loss_function = nn.L1Loss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    # train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,milestones=settings.MILESTONES,gamma=0.2)#learning rate decay
    scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)

    log = Logger(join(TMP_DIR, '%s-%d-log.txt' % ('Adam', args.lr)))
    sys.stdout = log
    train_loss = []
    train_loss_detail = []

    for epoch in range(args.start_epoch, args.maxepoch):
        if epoch == 0:
            print("Performing initial testing...")
            # 暂时空着

        tr_avg_loss, tr_detail_loss = train(model = model,optimizer = optimizer,epoch= epoch,save_dir=join(TMP_DIR, 'epoch-%d-training-record' % epoch))
        test()

        log.flush()
        # Save checkpoint
        save_file = os.path.join(TMP_DIR, 'checkpoint_epoch{}.pth'.format(epoch))
        save_checkpoint({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()})

        scheduler.step()  # 自动调整学习率
        train_loss.append(tr_avg_loss)
        train_loss_detail += tr_detail_loss
import torchvision.datasets as dsets

from pathlib import Path
import os
from tqdm import tqdm

import preprocessing

torch.cuda.set_device(0)  #使用 GPU
EVAL_DATA_PATH = Path('./data/eval')  #验证集路径
TRAIN_DATA_PATH = Path('./data/train')  #训练集路径
BATCH_SIZE = 64  #批大小

from model import model

model = model.cuda()
print(model)

#定义训练和验证集 和 dataloader
eval_dataset = dsets.ImageFolder(EVAL_DATA_PATH,
                                 transform=preprocessing.transform)
train_dataset = dsets.ImageFolder(TRAIN_DATA_PATH,
                                  transform=preprocessing.transform)

eval_dataloader = DataLoader(eval_dataset, batch_size=BATCH_SIZE, shuffle=True)
train_dataloader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True)

#使用交叉熵为 loss,使用 SGD 优化方法
criterion = NN.CrossEntropyLoss()
Example #3
0
model = model(name=name)
'''
for name, child in model.named_children():
   if name in ['fc','layer4','layer3']:
       print(name + ' is unfrozen')
       for param in child.parameters():
           param.requires_grad = True
   else:
       print(name + ' is frozen')
       for param in child.parameters():
           param.requires_grad = False
'''

if use_cuda:
    print('Using GPU')
    model.cuda()
else:
    print('Using CPU')

criterion = nn.CrossEntropyLoss()

# Observe that all parameters are being optimized
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)

#best_acc = 0
for epoch in range(1, epoch + 1):
    train(epoch)
    validation()
Example #4
0



##学習

params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
num_epochs = 1

#GPUのキャッシュクリア
import torch
torch.cuda.empty_cache()

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') 
model.cuda()##


#既存手法
#engin.pyを参考にしてる

model.train()#学習モードに移行


for epoch in range(num_epochs):
 
    #model.train()#これから学習しますよ
    
    for i, batch in enumerate(train_dataloader):