Пример #1
0
def train(epoch):
    train_loader = torch.utils.data.DataLoader(
        lmdb_utils.lmdbDataset(traindb, shape=(model.module.width, model.module.height),
                       shuffle=True,
                       train=True, seen=model.module.seen),
        batch_size=batch_size, shuffle=False, **kwargs)

    logging('epoch %d : processed %d samples' % (epoch, epoch * len(train_loader.dataset)))
    model.train()
    adjust_learning_rate(optimizer, epoch)
    for batch_idx, (data, target) in enumerate(train_loader):
        if (batch_idx+1) % 70 == 0:
            sys.stdout.write('.')

        if use_cuda:
            data = data.cuda()
            #target= target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = region_loss(output, target)
        loss.backward()
        optimizer.step()
    print('')
    logging('save weights to %s/%06d.weights' % (backupdir, epoch+1))
    model.module.seen = (epoch + 1) * len(train_loader.dataset)
    model.module.save_weights('%s/%06d.weights' % (backupdir, epoch+1))
Пример #2
0
###############
torch.manual_seed(seed)
if use_cuda:
    torch.cuda.manual_seed(seed)

model       = Darknet(cfgfile)
region_loss = model.loss

model.load_weights(weightfile)
model.print_network()
init_epoch = model.seen / nsamples 

kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
    lmdb_utils.lmdbDataset(testdb, shape=(160, 160),
                   shuffle=False,
                   transform=None,
                   train=False),
    batch_size=batch_size, shuffle=False, **kwargs)

if use_cuda:
    model = torch.nn.DataParallel(model).cuda()

optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)

def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = learning_rate * (0.1 ** (epoch // 50))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    logging('set lr=%f' % (lr))
Пример #3
0
eps = 1e-5

###############
torch.manual_seed(seed)
if use_cuda:
    torch.cuda.manual_seed(seed)

model = Darknet(cfgfile)
region_loss = model.loss

model.load_weights(weightfile)
model.print_network()
init_epoch = model.seen / nsamples

kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(lmdb_utils.lmdbDataset(
    testdb, shape=(160, 160), shuffle=False, transform=None, train=False),
                                          batch_size=batch_size,
                                          shuffle=False,
                                          **kwargs)

if use_cuda:
    model = torch.nn.DataParallel(model).cuda()

optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)


def adjust_learning_rate(optimizer, epoch):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = learning_rate * (0.1**(epoch // 50))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr