예제 #1
0
def validate(model, valloader, n_class):
    import pdb;pdb.set_trace()
    losses = AverageMeter()
    model.eval()
    gts, preds = [], []
    for i, (images, labels) in enumerate(valloader):
        images = Variable(images.cuda())
        labels = Variable(labels.cuda())
        outputs = model(images)
    if(isinstance(outputs, tuple)):
        outputs = outputs[0]

        loss = cross_entropy2d(outputs, labels)
        losses.update(loss.data[0], images.size(0))

        gt = labels.data.cpu().numpy()
        pred = outputs.data.max(1)[1].cpu().numpy()
        #pred = outputs.data[:,1:,:,:].max(1)[1].cpu().numpy() + 1

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)
    score = scores(gts, preds, n_class=n_class)

    return losses.avg, score
예제 #2
0
def train(args):
    # Setup train DataLoader
    trainloader = CCFLoader(args.traindir, split=args.split,
                            is_transform=True, img_size=(args.img_rows, args.img_cols))
    n_classes = trainloader.n_classes
    TrainDataLoader = data.DataLoader(
        trainloader, batch_size=args.batch_size, num_workers=4, shuffle=True)

    # Setup validate DataLoader
    valloader = CCFLoader(args.traindir, split='val', is_transform=True, img_size=(
        args.img_rows, args.img_cols))
    VALDataLoader = data.DataLoader(
        valloader, batch_size=4, num_workers=4, shuffle=False)

    # Setup visdom for visualization
    vis = visdom.Visdom()
    assert vis.check_connection()

    loss_window = vis.line(X=np.zeros((1,)),
                           Y=np.zeros((1)),
                           opts=dict(xlabel='minibatches',
                                     ylabel='Loss',
                                     title=args.arch+' Training Loss',
                                     legend=['Loss']))
    valacc_window = vis.line(X=np.zeros((1,)),
                             Y=np.zeros((1)),
                             opts=dict(xlabel='minibatches',
                                       ylabel='ACC',
                                       title='Val ACC',
                                       legend=['ACC']))

    # Setup model
    if(args.snapshot == None):
        model = get_model(args.arch, n_classes)
        model = DataParallel(model.cuda(args.gpu[0]), device_ids=args.gpu)
        start_epoch = 0
    else:
        model = get_model(args.arch, n_classes)
        state_dict = torch.load(args.snapshot).state_dict()
        from collections import OrderedDict
        new_state_dict = OrderedDict()
        for key, value in list(state_dict.items()):
            original_key = key[7:] # remove 'moudle.'
            new_state_dict[original_key] = value
        model.load_state_dict(new_state_dict)
        model = DataParallel(model.cuda(), device_ids=[i for i in range(len(args.gpu))])
        start_epoch = int(os.path.basename(args.snapshot).split('.')[0])

    optimizer = torch.optim.SGD(model.parameters(), lr=args.l_rate, momentum=0.99, weight_decay=5e-4)

    print(model)

    # Start training
    for epoch in range(args.n_epoch):
        adjust_learning_rate(optimizer, args.l_rate, epoch, args.step)
        if(epoch < start_epoch):
            continue
        print("Epoch [%d/%d] learning rate: %f" % (epoch+1, args.n_epoch, optimizer.param_groups[0]['lr']))
        for i, (images, labels) in enumerate(TrainDataLoader):
            if torch.cuda.is_available():
                images = Variable(images.cuda(args.gpu[0]))
                labels = Variable(labels.cuda(args.gpu[0]))
            else:
                images = Variable(images)
                labels = Variable(labels)

            iter = len(TrainDataLoader)*epoch + i
            #poly_lr_scheduler(optimizer, args.l_rate, iter)

            model.train()
            optimizer.zero_grad()
            outputs = model(images)
            if(isinstance(outputs, tuple)):
                loss = cross_entropy2d(outputs[0], labels, weights_per_class) + args.clsloss_weight * bin_clsloss(outputs[1], labels)
            else:
                #loss = cross_entropy2d(outputs, labels)
                loss = cross_entropy2d(outputs, labels, weights_per_class)
                #loss = focal_loss2d(outputs, labels)

            loss.backward()
            optimizer.step()

            vis.line(
                X=torch.ones((1, 1)).cpu()*iter,
                Y=torch.Tensor([loss.data]).unsqueeze(0).cpu(),
                win=loss_window,
                update='append')

        print("Epoch [%d/%d] loss: %f" % (epoch+1, args.n_epoch, loss))

        # validation
        loss, score = validate(model, VALDataLoader, n_classes)
        for i in range(n_classes):
            print(i, score['Class Acc'][i])
        vis.line(
            X=torch.ones((1, 1)).cpu()*(epoch+1),
            Y=torch.ones((1, 1)).cpu()*score['Overall Acc'],
            win=valacc_window,
            update='append')

        if(not os.path.exists("snapshot/{}".format(args.arch))):
            os.mkdir("snapshot/{}".format(args.arch))
        torch.save(model, "snapshot/{}/{}.pkl".format(args.arch, epoch+1))
예제 #3
0
def main():
    print('RUNDIR: {}'.format(args.logdir))
    sys.stdout.flush()
    logger = function.get_logger(args.logdir)
    logger.info('test')  # write in log file
    running_metrics_val = function.runningScoreSeg(args.n_class)
    val_loss_meter = function.averageMeter()
    time_meter = function.averageMeter()

    print(len(valloader))
    start_ts = time.time()  # return current time stamp
    model.eval()
    with torch.no_grad():
        for i_val, (leftimgval, rightimgval, labelval, disp_true,
                    L_name) in tqdm(enumerate(valloader)):
            imgL = leftimgval.numpy()
            imgR = rightimgval.numpy()

            if args.cuda:
                imgL = torch.FloatTensor(imgL).cuda()
                imgR = torch.FloatTensor(imgR).cuda()

            imgL, imgR = Variable(imgL), Variable(imgR)
            output = model(imgL, imgR)  # 1 1024 2048    1 19 1024 2048
            pred_seg = output0.data.cpu().numpy()

            # FCN OF SEGMETATION

            N, _, h, w = pred_seg.shape  # 4,12,192,704,numpy
            pred_segmap = pred_seg.transpose(0, 2, 3, 1).reshape(
                -1, args.n_class).argmax(axis=1).reshape(N, h, w)
            img = drawseg.direct_render(pred_segmap, args.n_class)
            skimage.io.imsave(args.saveseg + (L_name[0].split('/')[-1]),
                              img[0])

            # segmetation mIoU
            score = torch.from_numpy(pred_seg).cuda()
            lossval = cross_entropy2d(
                score, labelval.cuda())  # mean pixelwise loss in a batch
            pred = score.data.max(
                1)[1].cpu().numpy()  # [batch_size, height, width]#229,485
            gt = labelval.data.cpu().numpy(
            )  # [batch_size, height, width]#256,512
            running_metrics_val.update(gt=gt, pred=pred)
            val_loss_meter.update(lossval.item())

            torch.cuda.empty_cache()
        logger.info(" val_loss: %.4f" % (val_loss_meter.avg))

        print("val_loss: %.4f" % (val_loss_meter.avg))
        #"""
        # output scores
        score, class_iou = running_metrics_val.get_scores()
        for k, v in score.items():

            print(k, v)
            sys.stdout.flush()
            logger.info('{}: {}'.format(k, v))

        for k, v in class_iou.items():
            print(k, v)
            logger.info('{}: {}'.format(k, v))
예제 #4
0
def main():
    writer = SummaryWriter(log_dir=args.logdir)
    print('RUNDIR: {}'.format(args.logdir))
    sys.stdout.flush()
    #shutil.copy(args.config, args.logdir)  # copy config file to path of logdir
    logger = function.get_logger(args.logdir)
    logger.info('Let the games begin')  # write in log file
    running_metrics_val = function.runningScoreSeg(args.n_class)
    val_loss_meter = function.averageMeter()
    time_meter = function.averageMeter()
    best_iou = -100.0
    i = 0
    flag = True
    train_iters=200000
    print_iter= 150 #150
    val_iters=200 #750
    save_iters=10000
    while i <= train_iters and flag:  # 3000000
        print(len(trainloader))
        for (leftimg,labels) in trainloader:
            start_ts = time.time()  # return current time stamp
            model.train()  # set model to training mode
            imgL = Variable(leftimg).cuda()
            label = Variable(labels).cuda()#1,256,512

            optimizer.zero_grad()  # clear earlier gradients
            output0 = model(imgL)
            loss = cross_entropy2d(output0, label).to(device)

            loss.backward()  # backpropagation loss
            optimizer.step()  # optimizer parameter update
            time_meter.update(time.time() - start_ts)

            if (i + 1) %print_iter == 0:  # 150
                fmt_str = "Iter [{:d}/{:d}]  Loss: {:.4f}  Time/Image: {:.4f}"
                print_str = fmt_str.format(i + 1,train_iters,loss.item(),time_meter.val / args.batch_size)
                print(print_str)
                logger.info(print_str)
                writer.add_scalar('loss/train_loss', loss.item(), i + 1 )
                time_meter.reset()

            if (i + 1) % save_iters == 0:
                savefilename = args.savemodel + 'iter_' + str(i) + '.tar'
                torch.save({
                    'iter': i,
                    'state_dict': model.state_dict(),
                }, savefilename)

            torch.cuda.empty_cache()
            if (i + 1) % val_iters == 0 or (i + 1) == train_iters:  # 750
                model.eval()
                with torch.no_grad():
                    for i_val, (leftimgval, labelval) in tqdm(enumerate(valloader)):
                        imgL = Variable(leftimgval.cuda())
                        output0val = model(imgL) # [batch_size, n_classes, height, width]

                        #segmetation
                        lossval = cross_entropy2d(output0val, labelval.cuda())# mean pixelwise loss in a batch
                        pred = output0val.data.max(1)[1].cpu().numpy()  # [batch_size, height, width]#229,485
                        gt = labelval.data.cpu().numpy()  # [batch_size, height, width]#256,512
                        running_metrics_val.update(gt=gt, pred=pred)
                        val_loss_meter.update(lossval.item())

                writer.add_scalar('loss/val_loss', val_loss_meter.avg, i + 1)
                logger.info("Iter %d val_loss: %.4f" % (i + 1, val_loss_meter.avg))
                print("Iter %d val_loss: %.4f" % (i + 1, val_loss_meter.avg))

                # output scores
                score, class_iou = running_metrics_val.get_scores()
                for k, v in score.items():
                    print(k, v)
                    sys.stdout.flush()
                    logger.info('{}: {}'.format(k, v))
                    writer.add_scalar('val_metrics/{}'.format(k), v, i + 1)
                for k, v in class_iou.items():
                    logger.info('{}: {}'.format(k, v))
                    writer.add_scalar('val_metrics/cls_{}'.format(k), v, i + 1)
                torch.cuda.empty_cache()

                # SAVE best model for segmentation
                save_model = False
                if score["Mean IoU : \t"] >= best_iou:
                    best_iou = score["Mean IoU : \t"]
                    save_model = True
                if save_model:
                    # SAVE
                    savefilename = args.savemodel + 'model_best_iou'+ '.tar'
                    torch.save({
                        'iter': i,
                        'state_dict': model.state_dict(),
                        "best_iou": best_iou,
                    }, savefilename)
                val_loss_meter.reset()
                running_metrics_val.reset()

            if (i + 1) == train_iters:
                flag = False
                break
            i += 1