Пример #1
0
def run(pretrained_model):
    log(f' :: Start training with {pretrained_model}')
    net = RACNN(num_classes=200).cuda()
    net.load_state_dict(torch.load(pretrained_model))
    cudnn.benchmark = True

    cls_params = list(net.b1.parameters()) + list(net.b2.parameters()) + list(net.b3.parameters()) + \
        list(net.classifier1.parameters()) + list(net.classifier2.parameters()) + list(net.classifier3.parameters())
    apn_params = list(net.apn1.parameters()) + list(net.apn2.parameters())

    cls_opt = optim.SGD(cls_params, lr=0.001, momentum=0.9)
    apn_opt = optim.SGD(apn_params, lr=0.001, momentum=0.9)

    trainset = CUB200_loader('external/CUB_200_2011', split='train')
    testset = CUB200_loader('external/CUB_200_2011', split='test')
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              collate_fn=trainset.CUB_collate,
                                              num_workers=4)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=8,
                                             shuffle=False,
                                             collate_fn=testset.CUB_collate,
                                             num_workers=4)
    sample = random_sample(testloader)

    for epoch in range(50):
        cls_loss = train(net, trainloader, cls_opt, epoch, 'backbone')
        rank_loss = train(net, trainloader, apn_opt, epoch, 'apn')
        test(net, testloader)

        # visualize cropped inputs
        _, _, _, resized = net(sample.unsqueeze(0))
        x1, x2 = resized[0].data, resized[1].data
        save_img(
            x1,
            path=f'build/.cache/epoch_{epoch}@2x.jpg',
            annotation=f'cls_loss = {cls_loss:.7f}, rank_loss = {rank_loss:.7f}'
        )
        save_img(
            x2,
            path=f'build/.cache/epoch_{epoch}@4x.jpg',
            annotation=f'cls_loss = {cls_loss:.7f}, rank_loss = {rank_loss:.7f}'
        )

        # save model per 10 epoches
        if epoch % 10 == 0 and epoch != 0:
            stamp = f'e{epoch}{int(time.time())}'
            torch.save(net.state_dict,
                       f'build/racnn_mobilenetv2_cub200-e{epoch}s{stamp}.pt')
            log(f' :: Saved model dict as:\tbuild/racnn_mobilenetv2_cub200-e{epoch}s{stamp}.pt'
                )
            torch.save(cls_opt.state_dict, f'build/cls_optimizer-s{stamp}.pt')
            torch.save(apn_opt.state_dict, f'build/apn_optimizer-s{stamp}.pt')
Пример #2
0
def run():
    log(f'forge started')
    net = RACNN(num_classes=120).cuda()

    net.load_state_dict(torch.load('build/new_racnn_pretrained-1618222083.pt'))

    # load the model
    # net.load_state_dict(torch.load('build/racnn_mobilenetv2_cub200-e39se391618150738.pt'))
    # net.eval()

    cudnn.benchmark = True

    cls_params = list(net.b1.parameters()) + list(net.b2.parameters()) + list(net.b3.parameters()) + \
        list(net.classifier1.parameters()) + list(net.classifier2.parameters()) + list(net.classifier3.parameters())
    apn_params = list(net.apn1.parameters()) + list(net.apn2.parameters())

    cls_opt = optim.SGD(cls_params, lr=0.001, momentum=0.9)
    apn_opt = optim.SGD(apn_params, lr=0.001, momentum=0.9)

    trainset = StanfordDogLoader('external/StanfordDog', split='train')
    testset = StanfordDogLoader('external/StanfordDog', split='test')
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, collate_fn=trainset.CUB_collate, num_workers=4)
    testloader = torch.utils.data.DataLoader(testset, batch_size=8, shuffle=False, collate_fn=testset.CUB_collate, num_workers=4)
    sample = random_sample(testloader)

    # load the optimizer
    # cls_opt.load_state_dict(torch.load('build/cls_optimizer-se391618150738.pt'))
    # apn_opt.load_state_dict(torch.load('build/apn_optimizer-se391618150738.pt'))
    # test(net, testloader)
    # print('loaded')

    for epoch in range(50):
        cls_loss = train(net, trainloader, cls_opt, epoch, 'backbone')
        rank_loss = train(net, trainloader, apn_opt, epoch, 'apn')
        test(net, testloader)

        # visualize cropped inputs
        _, _, _, resized = net(sample.unsqueeze(0))
        x1, x2 = resized[0].data, resized[1].data
        save_img(x1, path=f'build/.cache/epoch_{epoch}@2x.jpg', annotation=f'cls_loss = {cls_loss:.7f}, rank_loss = {rank_loss:.7f}')
        save_img(x2, path=f'build/.cache/epoch_{epoch}@4x.jpg', annotation=f'cls_loss = {cls_loss:.7f}, rank_loss = {rank_loss:.7f}')

        # save model per 10 epoches
        if epoch % 5 == 4:
            stamp = f'e{epoch}{int(time.time())}'
            torch.save(net.state_dict(), f'build/new_racnn_mobilenetv2_-e{epoch}_{stamp}.pt')
            torch.save(net, f'build/main_net-e{epoch}_{stamp}.pt')
            log(f' :: Saved model dict')
            torch.save(cls_opt.state_dict(), f'build/new_cls_optimizer-s{stamp}.pt')
            torch.save(apn_opt.state_dict(), f'build/new_apn_optimizer-s{stamp}.pt')
Пример #3
0
def train(net, dataloader, optimizer, epoch, _type):
    assert _type in ['apn', 'backbone']
    losses = 0
    net.mode(_type), log(f' :: Switch to {_type}')  # switch loss type
    for step, (inputs, targets) in enumerate(dataloader, 0):
        loss = net.echo(inputs, targets, optimizer)
        losses += loss

        if step % 20 == 0 and step != 0:
            avg_loss = losses/20
            log(f':: loss @step({step:2d}/{len(dataloader)})-epoch{epoch}: {loss:.10f}\tavg_loss_20: {avg_loss:.10f}')
            losses = 0

    return avg_loss
Пример #4
0
def test(net, dataloader):
    log(' :: Testing on test set ...')
    correct_summary = {'clsf-0': {'top-1': 0, 'top-5': 0}, 'clsf-1': {'top-1': 0, 'top-5': 0}, 'clsf-2': {'top-1': 0, 'top-5': 0}}
    for step, (inputs, labels) in enumerate(dataloader, 0):
        inputs, labels = Variable(inputs).cuda(), Variable(labels).cuda()

        with torch.no_grad():
            outputs, _, _, _ = net(inputs)
            for idx, logits in enumerate(outputs):
                correct_summary[f'clsf-{idx}']['top-1'] += torch.eq(logits.topk(max((1, 1)), 1, True, True)[1], labels.view(-1, 1)).sum().float().item()  # top-1
                correct_summary[f'clsf-{idx}']['top-5'] += torch.eq(logits.topk(max((1, 5)), 1, True, True)[1], labels.view(-1, 1)).sum().float().item()  # top-5

            if step > 200:
                for clsf in correct_summary.keys():
                    _summary = correct_summary[clsf]
                    for topk in _summary.keys():
                        log(f'\tAccuracy {clsf}@{topk} ({step}/{len(dataloader)}) = {_summary[topk]/((step+1)*int(inputs.shape[0])):.5%}')
                return