Esempio n. 1
0
def fit_ont_epoch(net,epoch,epoch_size,epoch_size_val,gen,genval,Epoch):
    train_util = FasterRCNNTrainer(net,optimizer)
    total_loss = 0
    rpn_loc_loss = 0
    rpn_cls_loss = 0
    roi_loc_loss = 0
    roi_cls_loss = 0
    val_toal_loss = 0
    with tqdm(total=epoch_size,desc=f'Train Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_size:
                break
            start_time = time.time()
            imgs,boxes,labels = batch[0], batch[1], batch[2]

            with torch.no_grad():
                imgs = Variable(torch.from_numpy(imgs).type(torch.FloatTensor)).cuda()
                boxes = [Variable(torch.from_numpy(box).type(torch.FloatTensor)).cuda() for box in boxes]
                labels = [Variable(torch.from_numpy(label).type(torch.FloatTensor)).cuda() for label in labels]
            losses = train_util.train_step(imgs, boxes, labels, 1)
            rpn_loc, rpn_cls, roi_loc, roi_cls, total = losses
            total_loss += total
            rpn_loc_loss += rpn_loc
            rpn_cls_loss += rpn_cls
            roi_loc_loss += roi_loc
            roi_cls_loss += roi_cls

            waste_time = time.time() - start_time
            
            pbar.set_postfix(**{'total_loss'      : float(total_loss/(iteration+1)), 
                                'rpn_loc_loss'    : float(rpn_loc_loss/(iteration+1)), 
                                'rpn_cls_loss'    : float(rpn_cls_loss/(iteration+1)),
                                'roi_loc_loss'    : float(roi_loc_loss/(iteration+1)),
                                'roi_cls_loss'    : float(roi_cls_loss/(iteration+1))})
            pbar.update(1)
            

    with tqdm(total=epoch_size_val, desc=f'Val Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
        for iteration, batch in enumerate(genval):
            if iteration >= epoch_size_val:
                break
            imgs,boxes,labels = batch[0], batch[1], batch[2]
            with torch.no_grad():
                imgs = Variable(torch.from_numpy(imgs).type(torch.FloatTensor)).cuda()
                boxes = Variable(torch.from_numpy(boxes).type(torch.FloatTensor)).cuda()
                labels = Variable(torch.from_numpy(labels).type(torch.FloatTensor)).cuda()

                train_util.optimizer.zero_grad()
                losses = train_util.forward(imgs, boxes, labels, 1)
                _,_,_,_, val_total = losses
                val_toal_loss += val_total
                
            pbar.set_postfix(**{'total_loss': val_toal_loss / (iteration + 1)})
            pbar.update(1)
            
    print('\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))
    print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_toal_loss/(epoch_size_val+1)))

    print('Saving state, iter:', str(epoch+1))
    torch.save(model.state_dict(), './Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_toal_loss/(epoch_size_val+1)))
Esempio n. 2
0
def fit_ont_epoch(net,epoch,epoch_size,epoch_size_val,gen,genval,Epoch):
    train_util = FasterRCNNTrainer(net,optimizer)
    total_loss = 0
    rpn_loc_loss = 0
    rpn_cls_loss = 0
    roi_loc_loss = 0
    roi_cls_loss = 0
    val_toal_loss = 0
    for iteration, batch in enumerate(gen):
        if iteration >= epoch_size:
            break
        start_time = time.time()
        imgs,boxes,labels = batch[0], batch[1], batch[2]

        with torch.no_grad():
            imgs = Variable(torch.from_numpy(imgs).type(torch.FloatTensor)).cuda()
            boxes = [Variable(torch.from_numpy(box).type(torch.FloatTensor)).cuda() for box in boxes]
            labels = [Variable(torch.from_numpy(label).type(torch.FloatTensor)).cuda() for label in labels]
        losses = train_util.train_step(imgs, boxes, labels, 1)
        rpn_loc, rpn_cls, roi_loc, roi_cls, total = losses
        total_loss += total
        rpn_loc_loss += rpn_loc
        rpn_cls_loss += rpn_cls
        roi_loc_loss += roi_loc
        roi_cls_loss += roi_cls

        waste_time = time.time() - start_time
        print('\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))
        print('iter:' + str(iteration) + '/' + str(epoch_size) + ' || total_loss: %.4f|| rpn_loc_loss: %.4f || rpn_cls_loss: %.4f || roi_loc_loss: %.4f || roi_cls_loss: %.4f || %.4fs/step' \
            % (total_loss/(iteration+1), rpn_loc_loss/(iteration+1),rpn_cls_loss/(iteration+1),roi_loc_loss/(iteration+1),roi_cls_loss/(iteration+1),waste_time))

    print('Start Validation')
    for iteration, batch in enumerate(genval):
        if iteration >= epoch_size_val:
            break
        imgs,boxes,labels = batch[0], batch[1], batch[2]
        with torch.no_grad():
            imgs = Variable(torch.from_numpy(imgs).type(torch.FloatTensor)).cuda()
            boxes = Variable(torch.from_numpy(boxes).type(torch.FloatTensor)).cuda()
            labels = Variable(torch.from_numpy(labels).type(torch.FloatTensor)).cuda()

            train_util.optimizer.zero_grad()
            losses = train_util.forward(imgs, boxes, labels, 1)
            _,_,_,_, val_total = losses
            val_toal_loss += val_total
    print('Finish Validation')
    print('\nEpoch:'+ str(epoch+1) + '/' + str(Epoch))
    print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss/(epoch_size+1),val_toal_loss/(epoch_size_val+1)))

    print('Saving state, iter:', str(epoch+1))
    torch.save(model.state_dict(), 'logs/Epoch%d-Total_Loss%.4f-Val_Loss%.4f.pth'%((epoch+1),total_loss/(epoch_size+1),val_toal_loss/(epoch_size_val+1)))
Esempio n. 3
0
from torch.utils import data as data_
from tqdm import tqdm
from model.faster_rcnn_vgg16 import decom_vgg16, VGG16RoIHead
import torch
from model.region_proposal_network import RegionProposalNetwork
import numpy as np
from model.faster_rcnn_vgg16 import FasterRCNNVGG16
from trainer import FasterRCNNTrainer

data_set = TrainDataset()
#data_loader = data_.DataLoader(data_set, batch_size=1, shuffle=False)

img, bbox, label, scale = data_set.__getitem__(0)
model = FasterRCNNVGG16().cuda()
trainer = FasterRCNNTrainer(model)
loss = trainer.forward(
    torch.from_numpy(img[None, :]).cuda(), bbox, label, scale)
print(loss)
"""
roi_locs, roi_scores, rpn_locs, rpn_scores = model.forward(torch.from_numpy(img[None, :]).cuda())
print(roi_locs.shape)
print(roi_scores.shape)
print(rpn_locs.shape)
print(rpn_scores.shape)
"""
"""
extractor, classifier = decom_vgg16()
feature_map = extractor.cuda()(torch.from_numpy(img[None, :]).cuda())
print(img.shape)#3, 600, 800
print(feature_map.shape)#1, 512, 37, 50

Esempio n. 4
0
def train(**kwargs):
    opt._parse(kwargs)

    image_folder_path = 'DataSets/images/'
    cvs_file_path = 'DataSets/labels.csv'

    dataset = DataSets(cvs_file_path, image_folder_path)
    data_size = len(dataset)
    indices = list(range(data_size))
    split = int(np.floor(data_size * 0.2))
    np.random.seed(42)
    np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]
    train_sampler = torch.utils.data.SubsetRandomSampler(train_indices)
    valid_sampler = torch.utils.data.SubsetRandomSampler(val_indices)

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=1,
                                               sampler=train_sampler)
    val_loader = torch.utils.data.DataLoader(dataset,
                                             batch_size=1,
                                             sampler=valid_sampler)
    print('load data')

    avg_loss = AverageValueMeter()
    ma20_loss = MovingAverageValueMeter(windowsize=20)
    faster_rcnn = FasterRCNNVGG16()
    print('model construct completed')
    start_epoch = 0
    best_map = -100
    trainer = FasterRCNNTrainer(faster_rcnn).cuda()
    optimizer = optim.SGD(trainer.faster_rcnn.parameters(),
                          lr=opt.lr,
                          momentum=0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

    if opt.load_path:
        print('load pretrained model from %s' % opt.load_path)
        checkpoint = torch.load(opt.load_path)
        start_epoch = checkpoint['epoch']
        best_map = checkpoint['best_map']
        trainer.faster_rcnn.load_state_dict(checkpoint['model_state'])
        optimizer.load_state_dict(checkpoint['optimizer_state'])
        print("> Loaded checkpoint '{}' (epoch {})".format(
            args.resume, start_epoch))

    #trainer.vis.text(dataset.db.label_names, win='labels')

# set tensor-board for visualization
    writer = SummaryWriter('runs/' + opt.log_root)

    for epoch in range(start_epoch, opt.epoch):
        trainer.train(mode=True)  #must set as that in tranning
        for ii, (img, _, _, bbox_, label_, scale,
                 _) in enumerate(train_loader):
            scale = at.scalar(scale)
            img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
            optimizer.zero_grad()
            loss = trainer.forward(img, bbox, label, scale)
            loss.total_loss.backward()
            optimizer.step()
            #print(loss)
            #print(loss.total_loss)
            loss_value = loss.total_loss.cpu().data.numpy()
            avg_loss.add(float(loss_value))
            ma20_loss.add(float(loss_value))
            print(
                '[epoch:{}/{}]  [batch:{}/{}]  [sample_loss:{:.4f}] [avg_loss:{:.4f}]  [ma20_loss:{:.4f}]'
                .format(epoch, opt.epoch, ii + 1, len(train_loader),
                        loss.total_loss.data,
                        avg_loss.value()[0],
                        ma20_loss.value()[0]))

            if (ii + 1) % opt.plot_every == 0:
                niter = epoch * len(train_loader) + ii
                writer.add_scalar('Train/Loss', ma20_loss.value()[0], niter)

        eval_result = eval(val_loader, faster_rcnn, test_num=opt.test_num)
        print(eval_result['map'])

        if eval_result['map'] > best_map:
            best_map = eval_result['map']
            state = {
                "epoch": epoch + 1,
                "best_map": best_map,
                "model_state": trainer.faster_rcnn.state_dict(),
                "optimizer_state": optimizer.state_dict()
            }
            torch.save(state, opt.model_para)
        scheduler.step()
    state = {
        "epoch": epoch + 1,
        "best_map": best_map,
        "model_state": trainer.faster_rcnn.state_dict(),
        "optimizer_state": optimizer.state_dict()
    }
    torch.save(state, 'last_epoch.pkl')
    writer.close()