Пример #1
0
def train(train_loader, m, criterion, optimizer, writer):
    lossLogger = DataLogger()
    accLogger = DataLogger()
    m.train()

    train_loader_desc = tqdm(train_loader)

    for i, (inps, labels, setMask, img_info) in enumerate(train_loader_desc):
        if device != "cpu":
            inps = inps.cuda().requires_grad_()
            labels = labels.cuda()
            setMask = setMask.cuda()
        else:
            inps = inps.requires_grad_()
        out = m(inps)

        loss = criterion(out.mul(setMask), labels)

        acc = accuracy(out.data.mul(setMask), labels.data,
                       train_loader.dataset)

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))

        optimizer.zero_grad()

        if mix_precision:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        if config.sparse:
            for mod in m.modules():
                if isinstance(mod, nn.BatchNorm2d):
                    mod.weight.grad.data.add_(config.sparse_s *
                                              torch.sign(mod.weight.data))

        optimizer.step()
        opt.trainIters += 1
        # Tensorboard
        writer.add_scalar('Train/Loss', lossLogger.avg, opt.trainIters)
        writer.add_scalar('Train/Acc', accLogger.avg, opt.trainIters)

        # TQDM
        train_loader_desc.set_description(
            'loss: {loss:.8f} | acc: {acc:.2f}'.format(loss=lossLogger.avg,
                                                       acc=accLogger.avg *
                                                       100))

    train_loader_desc.close()

    return lossLogger.avg, accLogger.avg
Пример #2
0
def train(train_loader, m, criterion, optimizer, writer):

    # Logger
    lossLogger = DataLogger()
    accLogger = DataLogger()

    m.train()

    train_loader_desc = tqdm(train_loader)

    for i, (inps, labels, setMask, imgset) in enumerate(train_loader_desc):
        # 自动求导autograd函数功能
        inps = inps.cuda().requires_grad_()
        labels = labels.cuda()
        setMask = setMask.cuda()
        out = m(inps)

        # 计算loss
        loss = criterion(out.mul(setMask), labels)

        # 计算准确率
        acc = accuracy(out.data.mul(setMask), labels.data, train_loader.dataset)
        #
        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))

        # 加算梯度
        optimizer.zero_grad()
        # 反响传播
        loss.backward()
        optimizer.step()

        opt.trainIters +=1
        # 将数据写道tensorborx
        writer.add_scalar(
            'Train/Loss', lossLogger.avg, opt.trainIters
        )
        writer.add_scalar(
            'Train/Acc', lossLogger.avg, opt.trainIters
        )

        # TQDM
        train_loader_desc.set_description(
            'loss: {loss:.8f} | acc: {acc:.2f}'.format(
                loss=lossLogger.avg,
                acc=accLogger.avg * 100
            )
        )

    train_loader_desc.close()

    return lossLogger.avg, accLogger.avg
def train(train_loader, m, criterion, optimizer, writer):
    lossLogger = DataLogger()
    accLogger = DataLogger()
    m.train()

    # train_loader_desc = tqdm(train_loader)

    total = len(train_loader)
    total_desc = tqdm(range(total))
    train_loader = train_loader.__iter__()
    for ii in total_desc:
        try:
            inps, labels, setMask, imgset = train_loader.next()
        except BaseException as e:
            print('Error:', ii, e)
            continue

        inps = inps.cuda().requires_grad_()
        labels = labels.cuda()
        # setMask = setMask.cuda()
        out = m(inps)

        loss = criterion(out, labels)

        acc = accuracy(out.data, labels.data, train_loader.dataset)

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        opt.trainIters += 1
        # Tensorboard
        writer.add_scalar(
            'Train/Loss', lossLogger.avg, opt.trainIters)
        writer.add_scalar(
            'Train/Acc', accLogger.avg, opt.trainIters)

        # TQDM
        total_desc.set_description(
            'loss: {loss:.8f} | acc: {acc:.2f}'.format(
                loss=lossLogger.avg,
                acc=accLogger.avg * 100)
        )

    total_desc.close()

    return lossLogger.avg, accLogger.avg
Пример #4
0
def valid(val_loader, m, criterion, optimizer, writer):
    draw_kp = False
    lossLogger = DataLogger()
    accLogger = DataLogger()
    m.eval()

    val_loader_desc = tqdm(val_loader)

    for i, (inps, labels, setMask, img_info) in enumerate(val_loader_desc):
        if device != "cpu":
            inps = inps.cuda()
            labels = labels.cuda()
            setMask = setMask.cuda()

        with torch.no_grad():
            out = m(inps)

            loss = criterion(out.mul(setMask), labels)

            flip_out = m(flip(inps))
            flip_out = flip(shuffleLR(flip_out, val_loader.dataset))

            out = (flip_out + out) / 2

        acc = accuracy(out.mul(setMask), labels, val_loader.dataset)

        if not draw_kp:
            draw_kp = True
            kps_img = draw_kps(out)
            # writer.add

        lossLogger.update(loss.item(), inps.size(0))
        accLogger.update(acc[0], inps.size(0))

        opt.valIters += 1

        # Tensorboard
        writer.add_scalar('Valid/Loss', lossLogger.avg, opt.valIters)
        writer.add_scalar('Valid/Acc', accLogger.avg, opt.valIters)

        val_loader_desc.set_description(
            'loss: {loss:.8f} | acc: {acc:.2f}'.format(loss=lossLogger.avg,
                                                       acc=accLogger.avg *
                                                       100))

    val_loader_desc.close()

    return lossLogger.avg, accLogger.avg
def valid(val_loader, m, criterion, optimizer, writer):
    lossLogger = DataLogger()
    accLogger = DataLogger()
    m.eval()

    # val_loader_desc = tqdm(val_loader)

    for i, (inps, labels, setMask, imgset) in enumerate(val_loader):
        inps = inps.cuda()
        labels = labels.cuda()
        setMask = setMask.cuda()

        with torch.no_grad():
            out = m(inps)

            loss = criterion(out.mul(setMask), labels)

            flip_out = m(flip_v(inps, cuda=True))
            flip_out = flip_v(shuffleLR_v(flip_out,
                                          val_loader.dataset,
                                          cuda=True),
                              cuda=True)

            out = (flip_out + out) / 2

        acc = accuracy(out.mul(setMask), labels, val_loader.dataset)

        lossLogger.update(loss.item(), inps.size(0))
        accLogger.update(acc[0], inps.size(0))

        opt.valIters += 1

        # Tensorboard
        writer.add_scalar('Valid/Loss', lossLogger.avg, opt.valIters)
        writer.add_scalar('Valid/Acc', accLogger.avg, opt.valIters)

        # val_loader_desc.set_description(
        #     'loss: {loss:.8f} | acc: {acc:.2f}'.format(
        #         loss=lossLogger.avg,
        #         acc=accLogger.avg * 100)
        # )
    #     val_loader_desc.set_postfix(
    #         loss='%.2e' % lossLogger.avg, acc='%.2f%%' % (accLogger.avg * 100))

    # val_loader_desc.close()

    return lossLogger.avg, accLogger.avg
def train(train_loader, m, criterion, optimizer, writer):
    lossLogger = DataLogger()
    accLogger = DataLogger()
    f = open("acc_loss.csv", "w+")
    f.write('epoch,acc,loss,eval_acc\n')
    f.close()
    m.train()

    train_loader_desc = tqdm(train_loader)

    for i, (inps, labels, setMask, imgset) in enumerate(train_loader_desc):
        inps = inps.cuda().requires_grad_()
        labels = labels.cuda()
        setMask = setMask.cuda()
        out = m(inps)
        # embed()
        loss = criterion(out.mul(setMask), labels)

        acc = accuracy(out.data.mul(setMask), labels.data,
                       train_loader.dataset)

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        opt.trainIters += 1
        # Tensorboard
        writer.add_scalar('Train/Loss', lossLogger.avg, opt.trainIters)
        writer.add_scalar('Train/Acc', accLogger.avg, opt.trainIters)
        # writer.export_scalars_to_json("../log/all_scalars.json")
        # TQDM
        # train_loader_desc.set_description(
        #     'loss: {loss:.5f} | acc: {acc:.2f}'.format(
        #         loss=lossLogger.avg,
        #         acc=accLogger.avg)
        # )

    train_loader_desc.close()

    return lossLogger.avg, accLogger.avg
def train(train_loader, m, criterion, optimizer, writer):
    lossLogger = DataLogger()
    accLogger = DataLogger()
    m.train()

    # train_loader_desc = tqdm(train_loader)

    for i, (inps, labels, setMask, imgset) in enumerate(train_loader):
        inps = inps.cuda().requires_grad_()  #[32,17,80,64]
        labels = labels.cuda()  #[32,17,80,64]
        setMask = setMask.cuda()  #[32,17,80,64]
        out = m(inps)  #[32,17,80,64]

        loss = criterion(out.mul(setMask), labels)

        acc = accuracy(out.data.mul(setMask), labels.data,
                       train_loader.dataset)

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))

        # train_loader_desc.set_postfix(
        #     loss='%.2e' % lossLogger.avg, acc='%.2f%%' % (accLogger.avg * 100))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        opt.trainIters += 1
        # Tensorboard
        writer.add_scalar('Train/Loss', lossLogger.avg, opt.trainIters)
        writer.add_scalar('Train/Acc', accLogger.avg, opt.trainIters)

    # train_loader_desc.close()

    return lossLogger.avg, accLogger.avg
Пример #8
0
def train(train_loader, m, criterion, optimizer, writer):
    accLogger, distLogger, lossLogger, curveLogger = DataLogger(), DataLogger(
    ), DataLogger(), CurveLogger()
    pckhLogger = DataLogger()
    pts_acc_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_dist_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_curve_Loggers = {i: CurveLogger() for i in range(opt.kps)}
    pts_pckh_Loggers = {i: DataLogger() for i in range(12)}

    m.train()

    train_loader_desc = tqdm(train_loader)
    s = get_sparse_value()
    print("sparse value is {} in epoch {}".format(s, opt.epoch))
    # print("Training")

    for i, (inps, labels, setMask, img_info) in enumerate(train_loader_desc):
        if device != "cpu":
            inps = inps.cuda().requires_grad_()
            labels = labels.cuda()
            setMask = setMask.cuda()
        else:
            inps = inps.requires_grad_()
        out = m(inps)

        # loss = criterion(out.mul(setMask), labels)
        loss = torch.zeros(1).cuda()
        for cons, idx_ls in loss_params.items():
            loss += cons * criterion(out[:, idx_ls, :, :],
                                     labels[:, idx_ls, :, :])

        # for idx, logger in pts_loss_Loggers.items():
        #     logger.update(criterion(out.mul(setMask)[:, [idx], :, :], labels[:, [idx], :, :]), inps.size(0))
        acc, dist, exists, pckh, (maxval, gt) = cal_accuracy(
            out.data.mul(setMask), labels.data, train_loader.dataset.accIdxs)
        # acc, exists = accuracy(out.data.mul(setMask), labels.data, train_loader.dataset, img_info[-1])

        optimizer.zero_grad()

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))
        distLogger.update(dist[0], inps.size(0))
        pckhLogger.update(pckh[0], inps.size(0))
        curveLogger.update(
            maxval.reshape(1, -1).squeeze(),
            gt.reshape(1, -1).squeeze())
        ave_auc = curveLogger.cal_AUC()
        pr_area = curveLogger.cal_PR()

        for k, v in pts_acc_Loggers.items():
            pts_curve_Loggers[k].update(maxval[k], gt[k])
            if exists[k] > 0:
                pts_acc_Loggers[k].update(acc[k + 1], exists[k])
                pts_dist_Loggers[k].update(dist[k + 1], exists[k])
        pckh_exist = exists[-12:]
        for k, v in pts_pckh_Loggers.items():
            if exists[k] > 0:
                pts_pckh_Loggers[k].update(pckh[k + 1], pckh_exist[k])

        if mix_precision:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()

        if opt.freeze == 0:
            for mod in m.modules():
                if isinstance(mod, nn.BatchNorm2d):
                    mod.weight.grad.data.add_(s * torch.sign(mod.weight.data))

        optimizer.step()
        opt.trainIters += 1
        # Tensorboard
        writer.add_scalar('Train/Loss', lossLogger.avg, opt.trainIters)
        writer.add_scalar('Train/Acc', accLogger.avg, opt.trainIters)
        writer.add_scalar('Train/PCKh', pckhLogger.avg, opt.trainIters)
        writer.add_scalar('Train/Dist', distLogger.avg, opt.trainIters)
        writer.add_scalar('Train/AUC', ave_auc, opt.trainIters)
        writer.add_scalar('Train/PR', pr_area, opt.trainIters)

        # TQDM
        train_loader_desc.set_description(
            'Train: {epoch} | loss: {loss:.8f} | acc: {acc:.2f} | PCKh: {pckh:.4f} | dist: {dist:.4f} | AUC: {AUC:.4f} | PR: {PR:.4f}'
            .format(epoch=opt.epoch,
                    loss=lossLogger.avg,
                    acc=accLogger.avg * 100,
                    pckh=pckhLogger.avg * 100,
                    dist=distLogger.avg,
                    AUC=ave_auc,
                    PR=pr_area))

    body_part_acc = [Logger.avg for k, Logger in pts_acc_Loggers.items()]
    body_part_dist = [Logger.avg for k, Logger in pts_dist_Loggers.items()]
    body_part_auc = [
        Logger.cal_AUC() for k, Logger in pts_curve_Loggers.items()
    ]
    body_part_pr = [Logger.cal_PR() for k, Logger in pts_curve_Loggers.items()]
    body_part_pckh = [Logger.avg for k, Logger in pts_pckh_Loggers.items()]
    train_loader_desc.close()

    return lossLogger.avg, accLogger.avg, distLogger.avg, curveLogger.cal_AUC(), curveLogger.cal_PR(), \
           body_part_acc, body_part_dist, body_part_auc, body_part_pr
Пример #9
0
def valid(val_loader, m, criterion, writer):
    drawn_kp, drawn_hm = False, False
    accLogger, distLogger, lossLogger, curveLogger = DataLogger(), DataLogger(
    ), DataLogger(), CurveLogger()
    pckhLogger = DataLogger()
    pts_acc_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_dist_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_curve_Loggers = {i: CurveLogger() for i in range(opt.kps)}
    pts_pckh_Loggers = {i: DataLogger() for i in range(12)}
    m.eval()

    val_loader_desc = tqdm(val_loader)

    for i, (inps, labels, setMask, img_info) in enumerate(val_loader_desc):
        if device != "cpu":
            inps = inps.cuda()
            labels = labels.cuda()
            setMask = setMask.cuda()

        with torch.no_grad():
            out = m(inps)

            if not drawn_kp:
                try:
                    kps_img, have_kp = draw_kps(out, img_info)
                    drawn_kp = True
                    if draw_pred_img:
                        img = cv2.resize(kps_img, (1080, 720))
                        drawn_kp = False
                        cv2.imshow("val_pred", img)
                        cv2.waitKey(0)
                        # a = 1
                        # draw_kps(out, img_info)
                    else:
                        writer.add_image(
                            "result of epoch {}".format(opt.epoch),
                            cv2.imread(
                                os.path.join("exp", opt.expFolder, opt.expID,
                                             opt.expID,
                                             "img.jpg"))[:, :, ::-1],
                            dataformats='HWC')

                        hm = draw_hms(out[0])
                        writer.add_image(
                            "result of epoch {} --> heatmap".format(opt.epoch),
                            hm)
                except:
                    pass

            loss = criterion(out.mul(setMask), labels)

            # flip_out = m(flip(inps))
            # flip_out = flip(shuffleLR(flip_out, val_loader.dataset))
            #
            # out = (flip_out + out) / 2

        acc, dist, exists, pckh, (maxval, gt) = cal_accuracy(
            out.data.mul(setMask), labels.data, val_loader.dataset.accIdxs)
        # acc, exists = accuracy(out.mul(setMask), labels, val_loader.dataset, img_info[-1])

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))
        distLogger.update(dist[0], inps.size(0))
        pckhLogger.update(pckh[0], inps.size(0))
        curveLogger.update(
            maxval.reshape(1, -1).squeeze(),
            gt.reshape(1, -1).squeeze())
        ave_auc = curveLogger.cal_AUC()
        pr_area = curveLogger.cal_PR()

        for k, v in pts_acc_Loggers.items():
            pts_curve_Loggers[k].update(maxval[k], gt[k])
            if exists[k] > 0:
                pts_acc_Loggers[k].update(acc[k + 1], exists[k])
                pts_dist_Loggers[k].update(dist[k + 1], exists[k])
        pckh_exist = exists[-12:]
        for k, v in pts_pckh_Loggers.items():
            if exists[k] > 0:
                pts_pckh_Loggers[k].update(pckh[k + 1], pckh_exist[k])

        opt.valIters += 1

        # Tensorboard
        writer.add_scalar('Valid/Loss', lossLogger.avg, opt.valIters)
        writer.add_scalar('Valid/Acc', accLogger.avg, opt.valIters)
        writer.add_scalar('Valid/Dist', distLogger.avg, opt.valIters)
        writer.add_scalar('Valid/AUC', ave_auc, opt.valIters)
        writer.add_scalar('Valid/PR', pr_area, opt.valIters)

        val_loader_desc.set_description(
            'Valid: {epoch} | loss: {loss:.8f} | acc: {acc:.2f} | PCKh: {pckh:.4f} | dist: {dist:.4f} | AUC: {AUC:.4f} | PR: {PR:.4f}'
            .format(epoch=opt.epoch,
                    loss=lossLogger.avg,
                    acc=accLogger.avg * 100,
                    pckh=pckhLogger.avg * 100,
                    dist=distLogger.avg,
                    AUC=ave_auc,
                    PR=pr_area))

    body_part_acc = [Logger.avg for k, Logger in pts_acc_Loggers.items()]
    body_part_dist = [Logger.avg for k, Logger in pts_dist_Loggers.items()]
    body_part_auc = [
        Logger.cal_AUC() for k, Logger in pts_curve_Loggers.items()
    ]
    body_part_pr = [Logger.cal_PR() for k, Logger in pts_curve_Loggers.items()]
    body_part_pckh = [Logger.avg for k, Logger in pts_pckh_Loggers.items()]
    val_loader_desc.close()

    return lossLogger.avg, accLogger.avg, distLogger.avg, curveLogger.cal_AUC(), curveLogger.cal_PR(), \
           body_part_acc, body_part_dist, body_part_auc, body_part_pr
Пример #10
0
def test(loader, m, criterion):
    accLogger, distLogger, lossLogger, curveLogger = DataLogger(), DataLogger(
    ), DataLogger(), CurveLogger()
    pts_acc_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_dist_Loggers = {i: DataLogger() for i in range(opt.kps)}
    pts_curve_Loggers = {i: CurveLogger() for i in range(opt.kps)}
    m.eval()

    test_loader_desc = tqdm(loader)

    for i, (inps, labels, setMask, img_info) in enumerate(test_loader_desc):
        if device != "cpu":
            inps = inps.cuda()
            labels = labels.cuda()
            setMask = setMask.cuda()

        with torch.no_grad():
            out = m(inps)

            try:
                draw_kps(out, img_info)
            except:
                pass

            loss = criterion(out.mul(setMask), labels)

        acc, dist, exists, (maxval, gt) = cal_accuracy(out.data.mul(setMask),
                                                       labels.data,
                                                       loader.dataset.accIdxs)

        accLogger.update(acc[0], inps.size(0))
        lossLogger.update(loss.item(), inps.size(0))
        distLogger.update(dist[0], inps.size(0))
        curveLogger.update(
            maxval.reshape(1, -1).squeeze(),
            gt.reshape(1, -1).squeeze())
        ave_auc = curveLogger.cal_AUC()
        pr_area = curveLogger.cal_PR()

        for k, v in pts_acc_Loggers.items():
            pts_curve_Loggers[k].update(maxval[k], gt[k])
            if exists[k] > 0:
                pts_acc_Loggers[k].update(acc[k + 1], exists[k])
                pts_dist_Loggers[k].update(dist[k + 1], exists[k])

        test_loader_desc.set_description(
            'Test: | loss: {loss:.8f} | acc: {acc:.2f} | dist: {dist:.4f} | AUC: {AUC:.4f} | PR: {PR:.4f}'
            .format(epoch=opt.epoch,
                    loss=lossLogger.avg,
                    acc=accLogger.avg * 100,
                    dist=distLogger.avg,
                    AUC=ave_auc,
                    PR=pr_area))

    body_part_acc = [Logger.avg for k, Logger in pts_acc_Loggers.items()]
    body_part_dist = [Logger.avg for k, Logger in pts_dist_Loggers.items()]
    body_part_auc = [
        Logger.cal_AUC() for k, Logger in pts_curve_Loggers.items()
    ]
    body_part_pr = [Logger.cal_PR() for k, Logger in pts_curve_Loggers.items()]
    body_part_thresh = [
        Logger.get_thresh() for k, Logger in pts_curve_Loggers.items()
    ]
    test_loader_desc.close()
    print(
        "----------------------------------------------------------------------------------------------------"
    )

    return lossLogger.avg, accLogger.avg, distLogger.avg, curveLogger.cal_AUC(), curveLogger.cal_PR(), \
           body_part_acc, body_part_dist, body_part_auc, body_part_pr, body_part_thresh