Пример #1
0
def train(args):
    print('Dataset of instance(s) and batch size is {}'.format(
        args.batch_size))
    vgg = models.vgg16(True)
    model = YOLO(vgg.features)
    if args.use_cuda:
        model = torch.nn.DataParallel(model)
        model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    best = 1e+30

    for epoch in range(1, args.epochs + 1):
        l = train_epoch(epoch, model, optimizer, args)

        upperleft, bottomright, classes, confs = test_epoch(
            model, jpg='../data/1.jpg')
        is_best = l < best
        best = min(l, best)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, is_best)
    checkpoint = torch.load('./model_best.pth.tar')
    state_dict = checkpoint['state_dict']

    new_state_dict = OrderedDict()

    for k, v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    model.load_state_dict(new_state_dict)
    model.cpu()

    torch.save(model.state_dict(), 'model_cpu.pth.tar')
Пример #2
0
import torch
import numpy as np

# MARK: - load data
cocoDataset = COCODataset(tc.imageDir,
                          tc.annFile,
                          fromInternet=False if tc.imageDir else True)
dataLoader = DataLoader(cocoDataset, batch_size=tc.batchSize, shuffle=True)

# MARK: - train
model = YOLO().to(device)
if tc.preTrainedWeight:
    model.load_state_dict(torch.load(tc.preTrainedWeight, map_location=device))
    model.warmUpBatch = tc.warmUpBatches

optimizer = SGD(model.parameters(), lr=1e-3)
prevBestLoss = np.inf
batches = len(dataLoader)
logger = MetricsLogger()

model.train()
for epoch in range(tc.epochs):
    losses = []
    for batch, (x, y, z) in enumerate(dataLoader):
        x, y, z = x.to(device), y.to(device), z.to(device)

        loss = model(x, y, z)
        losses.append(loss.cpu().item())

        metrics = model.metrics
        logger.step(metrics, epoch, batch)
Пример #3
0
        yield imgs, tars


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""params"""
# anchors as unit of pixel
anchors = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
                    [59, 119], [116, 90], [156, 198], [373, 326]])
anchor_mask = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
image_size = (416, 416)  # default input image size
# transform into unit of image
anchors = anchors / np.asarray(image_size).reshape(1, 2)
num_classes = 80
"""train"""
net = YOLO()
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
loss_fn = yolo_loss

for epoch in range(10):
    loss = 0.
    num_of_batch = 20
    for imgs, tars in create_fake_data(image_size, num_of_batch=num_of_batch):
        imgs = imgs.to(device)
        feats = net(imgs)
        loss_ = loss_fn(feats, tars, anchors, anchor_mask, device, image_size)
        loss += loss_.item()
        optimizer.zero_grad()
        loss_.backward()
        optimizer.step()
    print("epoch {0}, loss {1}".format(epoch, loss / num_of_batch))
Пример #4
0

train_dataloader = DataLoader(train_data,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=0)
valid_dataloader = DataLoader(valid_data,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=0)

# show_boxes(image, boxes)
yolo = YOLO(4, 2)
loss_fn = YOLOLoss(classes, 2)

optim = torch.optim.SGD(yolo.parameters(), lr=0.001, momentum=0.9)

# checkpoint_path = "checkpoints/test2.checkpoint"
# yolo.load_state_dict(torch.load(checkpoint_path))
# print("loading checkpoint")

for epoch in range(epochs):
    print("epoch: ", epoch)

    # validation set eval
    with torch.no_grad():
        valid_loss = 0.
        for i_batch, sample_batched in enumerate(valid_dataloader):

            output = yolo(sample_batched['image'].float())