def train(args): print('Dataset of instance(s) and batch size is {}'.format( args.batch_size)) vgg = models.vgg16(True) model = YOLO(vgg.features) if args.use_cuda: model = torch.nn.DataParallel(model) model.cuda() optimizer = optim.Adam(model.parameters(), lr=args.lr) best = 1e+30 for epoch in range(1, args.epochs + 1): l = train_epoch(epoch, model, optimizer, args) upperleft, bottomright, classes, confs = test_epoch( model, jpg='../data/1.jpg') is_best = l < best best = min(l, best) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), }, is_best) checkpoint = torch.load('./model_best.pth.tar') state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] new_state_dict[name] = v model.load_state_dict(new_state_dict) model.cpu() torch.save(model.state_dict(), 'model_cpu.pth.tar')
from config import device, tc from model import YOLO from utils import * import torch import numpy as np # MARK: - load data cocoDataset = COCODataset(tc.imageDir, tc.annFile, fromInternet=False if tc.imageDir else True) dataLoader = DataLoader(cocoDataset, batch_size=tc.batchSize, shuffle=True) # MARK: - train model = YOLO().to(device) if tc.preTrainedWeight: model.load_state_dict(torch.load(tc.preTrainedWeight, map_location=device)) model.warmUpBatch = tc.warmUpBatches optimizer = SGD(model.parameters(), lr=1e-3) prevBestLoss = np.inf batches = len(dataLoader) logger = MetricsLogger() model.train() for epoch in range(tc.epochs): losses = [] for batch, (x, y, z) in enumerate(dataLoader): x, y, z = x.to(device), y.to(device), z.to(device) loss = model(x, y, z) losses.append(loss.cpu().item())