示例#1
0
    def validate(epoch):
        model.eval()
        top1 = AverageMeter()
        top5 = AverageMeter()
        global best_pred, acclist_train, acclist_val
        is_best = False
        for batch_idx, (data, target) in enumerate(val_loader):
            data, target = data.cuda(args.gpu), target.cuda(args.gpu)
            with torch.no_grad():
                output = model(data)
                acc1, acc5 = accuracy(output, target, topk=(1, 5))
                top1.update(acc1[0], data.size(0))
                top5.update(acc5[0], data.size(0))

        # sum all
        sum1, cnt1, sum5, cnt5 = torch_dist_sum(args.gpu, top1.sum, top1.count,
                                                top5.sum, top5.count)

        if args.eval:
            if args.gpu == 0:
                top1_acc = sum(sum1) / sum(cnt1)
                top5_acc = sum(sum5) / sum(cnt5)
                print('Validation: Top1: %.3f | Top5: %.3f' %
                      (top1_acc, top5_acc))
            return

        if args.gpu == 0:
            top1_acc = sum(sum1) / sum(cnt1)
            top5_acc = sum(sum5) / sum(cnt5)
            print('Validation: Top1: %.3f | Top5: %.3f' % (top1_acc, top5_acc))

            # save checkpoint
            acclist_val += [top1_acc]
            if top1_acc > best_pred:
                best_pred = top1_acc
                is_best = True
            state_dict = {
                'epoch': epoch,
                'state_dict': model.module.state_dict(),
                'optimizer': optimizer.state_dict(),
                'best_pred': best_pred,
                'acclist_train': acclist_train,
                'acclist_val': acclist_val,
            }
            if args.amp:
                state_dict['amp'] = amp.state_dict()
            encoding.utils.save_checkpoint(state_dict,
                                           args=args,
                                           is_best=is_best)
示例#2
0
    def validation(epoch):
        # Fast test during the training using single-crop only
        global best_pred
        is_best = False
        model.eval()
        metric.reset()

        for i, (image, target) in enumerate(valloader):
            with torch.no_grad():
                pred = model(image)[0]
                target = target.cuda(args.gpu)
                metric.update(target, pred)

            if i % 100 == 0:
                all_metircs = metric.get_all()
                all_metircs = utils.torch_dist_sum(args.gpu, *all_metircs)
                pixAcc, mIoU = utils.get_pixacc_miou(*all_metircs)
                if args.gpu == 0:
                    print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))

        all_metircs = metric.get_all()
        all_metircs = utils.torch_dist_sum(args.gpu, *all_metircs)
        pixAcc, mIoU = utils.get_pixacc_miou(*all_metircs)
        if args.gpu == 0:
            print('pixAcc: %.3f, mIoU: %.3f' % (pixAcc, mIoU))
            if args.eval: return
            new_pred = (pixAcc + mIoU)/2
            if new_pred > best_pred:
                is_best = True
                best_pred = new_pred
            utils.save_checkpoint({
                'epoch': epoch + 1,
                'state_dict': model.module.state_dict(),
                'optimizer': optimizer.state_dict(),
                'best_pred': best_pred,
            }, args, is_best)
示例#3
0
    def validation(epoch):
        # Fast test during the training using single-crop only
        global best_pred
        is_best = False
        model.eval()
        metric.reset()

        for i, (image, target) in enumerate(valloader):
            with torch.no_grad():
                pred = model(image)[0]
                target = target.cuda(args.gpu)
                metric.update(target, pred)

            if i % 100 == 0:
                all_metircs = metric.get_all()
                all_metircs = utils.torch_dist_sum(args.gpu, *all_metircs)
                pixAcc, mIoU = utils.get_pixacc_miou(*all_metircs)
                if args.gpu == 0:
                    print('pixAcc: %.3f, mIoU1: %.3f' % (pixAcc, mIoU))

        all_metircs = metric.get_all()
        all_metircs = utils.torch_dist_sum(args.gpu, *all_metircs)
        pixAcc, mIoU = utils.get_pixacc_miou(*all_metircs)
        if args.gpu == 0:
            print('pixAcc: %.3f, mIoU2: %.3f' % (pixAcc, mIoU))

            p_m.append((pixAcc, mIoU))
            plt.plot(p_m)
            plt.xlabel('10 Epoch')
            plt.ylabel('pixAcc, mIoU')
            plt.title('pixAcc, mIoU')
            plt.grid()
            plt.legend(('pixAcc', 'mIoU'))

            plt.savefig('./loss_fig/pixAcc_mIoU.pdf')
            plt.savefig('./loss_fig/pixAcc_mIoU.svg')
            plt.close()

            if args.eval: return
            new_pred = (pixAcc + mIoU) / 2
            new_preds.append(new_pred)

            plt.plot(new_preds)
            plt.xlabel('10 Epoch')
            plt.ylabel('new_predication')
            plt.title('new_predication')
            plt.grid()
            plt.savefig('./loss_fig/new_predication.pdf')
            plt.savefig('./loss_fig/new_predication.svg')
            plt.close()

            if new_pred > best_pred:
                is_best = True
                best_pred = new_pred
            utils.save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.module.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'best_pred': best_pred,
                },
                args,
                is_best,
                filename='checkpoint_train_{}.pth.tar'.format(epoch + 1))