コード例 #1
0
def evaluateByJsp(resJsp,
                  gtJsp,
                  log=True,
                  method=None,
                  levels=['averaged', 'easy', 'medium', 'hard']):
    if method is None:
        method = basename(dirname(dirname(dirname(resJsp))))


#        method=basename(dirname(resJsp))
    resTable = defaultdict(lambda: {})

    resJs = loadjson(resJsp)
    gtJs = loadjson(gtJsp)

    if 'averaged' in levels:
        level = 'averaged'
        row = evaluateByJson(
            resJs,
            gtJs,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row
    for level in filter(lambda x: x in levels, ['easy', 'medium', 'hard']):
        coco = loadjson(gtJsp)
        coco['images'] = [d for d in coco['images'] if d['level'] == level]
        imgIds = [d['id'] for d in coco['images']]
        coco['annotations'] = [
            bb for bb in coco['annotations'] if bb['image_id'] in imgIds
        ]
        resJs = loadjson(resJsp)
        row = evaluateByJson(
            resJs,
            coco,
        )

        row['method'] = method
        row['level'] = level
        resTable[level] = dict(row)
        tree - row

    resdir = dirname(resJsp)
    savejson(resTable, pathjoin(resdir, 'resTable.json'))
    return resTable
コード例 #2
0
def addPathToSys(_file_, pathToJoin='.'):
    '''
    将 join(__file__, pathToJoin)  加入 sys.path

    Parameters
    ----------
    _file_ : str
        .py 文件的路径 即__file__ 变量
    pathToJoin : str, default '.'
        相对路径
    '''
    from os.path import abspath, join, dirname
    apath = abspath(join(dirname(abspath(_file_)), pathToJoin))
    if apath not in sys.path:
        sys.path.append(apath)
    return apath
コード例 #3
0
def importByPath(pyPath):
    '''
    import `.py` file by a python file path, return the py file as a moudle

    >>> module = importByPath('far/away.py')
    '''
    from boxx import os, dirname, sys, withfun
    pyFile = pyPath
    assert os.path.isfile(pyFile) or os.path.isdir(pyFile), pyFile
    dirr = dirname(pyFile)
    import importlib

    def exitFun():
        assert sys.path.pop(0) == dirr

    with withfun(lambda: sys.path.insert(0, dirr), exitFun):
        module = importlib.import_module(
            os.path.basename(pyFile).replace('.py', ''))
        return module
コード例 #4
0
def main():
    args = parse_args()

    cf.args = args

    reset_config(config, args)

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'train')

    tb_log_dir = pathjoin(
        dirname(tb_log_dir), 'w%s,m%s,rs%s,t%s_' %
        (args.pointMaxW, args.probMargin, ''.join(map(str, args.rs)), args.t) +
        basename(tb_log_dir))

    logger.info(pprint.pformat(args))
    logger.info(pprint.pformat(config))

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    model = eval('models.' + config.MODEL.NAME + '.get_pose_net')(
        config, is_train=True)

    # copy model file
    this_dir = os.path.dirname(__file__)
    shutil.copy2(
        os.path.join(this_dir, '../lib/models', config.MODEL.NAME + '.py'),
        final_output_dir)

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    dump_input = torch.rand(
        (config.TRAIN.BATCH_SIZE, 3, config.MODEL.IMAGE_SIZE[1],
         config.MODEL.IMAGE_SIZE[0]))
    #writer_dict['writer'].add_graph(model, (dump_input, ), verbose=False)

    gpus = [int(i) for i in config.GPUS.split(',')]
    model = torch.nn.DataParallel(model, device_ids=gpus).cuda()

    # define loss function (criterion) and optimizer
    #    criterion = JointsMSELoss(
    #        use_target_weight=config.LOSS.USE_TARGET_WEIGHT
    #    ).cuda()

    if config.TRAIN.CRITERION == 'msssm_mean':
        criterion = MultiScaleSpatialSoftmax(log_freq=60 * 10,
                                             cyc_rs=args.rs,
                                             poolings=['avg', 'max'][:],
                                             pointMaxW=args.pointMaxW,
                                             probMargin=args.probMargin,
                                             temper=args.t)
        # p[1, 4, 10]* m[0, .5, .8]
#        criterion = MultiScaleSpatialSoftMax( poolings=['avg', 'max'], pointMaxW=1)
#        criterion = MultiScaleSpatialSoftMax(cyc_rs=[8, 4, 2, ], pointMaxW=1)
    elif config.TRAIN.CRITERION == 'ssm_mean':
        criterion = SpatialSoftmax()


#    criterion = torch.nn.DataParallel(criterion, device_ids=gpus).cuda()

#    cf.debugPoinMax = 30
    cf.debugPoinMax = False

    if cf.debugPoinMax:
        criterion = MultiScaleSpatialSoftmax(
            log_freq=30,
            cyc_rs=[],
            poolings=['avg', 'max'][:],
            pointMaxW=args.pointMaxW,
        )

    optimizer = get_optimizer(config, model)

    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR)

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    train_dataset = eval('dataset.' + config.DATASET.DATASET)(
        config, config.DATASET.ROOT, config.DATASET.TRAIN_SET, True,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    valid_dataset = eval('dataset.' + config.DATASET.DATASET)(
        config, config.DATASET.ROOT, config.DATASET.TEST_SET, False,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE * len(gpus),
        shuffle=config.TRAIN.SHUFFLE,
        num_workers=config.WORKERS,
        pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=True)

    best_perf = 0.0
    best_model = False
    for epoch in range(config.TRAIN.BEGIN_EPOCH, config.TRAIN.END_EPOCH):
        lr_scheduler.step()

        # train for one epoch
        train(config, train_loader, model, criterion, optimizer, epoch,
              final_output_dir, tb_log_dir, writer_dict)

        # evaluate on validation set
        perf_indicator = validate(config, valid_loader, valid_dataset, model,
                                  criterion, final_output_dir, tb_log_dir,
                                  writer_dict)

        if perf_indicator > best_perf:
            best_perf = perf_indicator
            best_model = True
        else:
            best_model = False

        logger.info('=> saving checkpoint to {}'.format(final_output_dir))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': get_model_name(config),
                'state_dict': model.state_dict(),
                'perf': perf_indicator,
                'optimizer': optimizer.state_dict(),
            }, best_model, final_output_dir)

    final_model_state_file = os.path.join(final_output_dir,
                                          'final_state.pth.tar')
    logger.info(
        'saving final model state to {}'.format(final_model_state_file))
    torch.save(model.module.state_dict(), final_model_state_file)
    writer_dict['writer'].close()
    print(args)
コード例 #5
0
ファイル: visDir.py プロジェクト: DIYer22/maskrcnn-benchmark
def visBboxList(rgb,
                bboxList,
                path='/tmp/visCanvas/tmp.pdf',
                classNames=None,
                box_alpha=0.8,
                show_class=True,
                thresh=0.1,
                show_mask=None,
                pltshow=False):

    if classNames is None:
        classNames = {i: "%d-class" % i for i in range(1111)}
    classn = len(classNames)
    dataset = dicto(classes=classNames)
    cls_segms = [[] for _ in range(classn)]
    cls_boxes = [np.zeros((0, 5), np.float32) for _ in range(classn)]

    #cls_segms = None
    #cls_boxes = [np.zeros((0,5), np.float32) for _ in range(classn)]
    bboxnps = bboxList.bbox
    extraFields = {
        k: v.cpu().numpy()
        for k, v in bboxList.extra_fields.items()
    }
    if show_mask is None:
        if 'mask' in extraFields:
            show_mask = True

    for ind, bboxnp in enumerate(bboxnps):
        #        g()
        other = dicto({k: v[ind] for k, v in extraFields.items()})
        if other.scores < thresh:
            continue
        c = other.labels
        if show_mask:
            rle = mask2rle(other.mask)
            cls_segms[c].append(rle)
        if bboxList.mode == 'xyxy':
            cls_boxes[c] = np.append(cls_boxes[c],
                                     [list(bboxnp) + [other.scores]], 0)

    cls_keyps = None
    if not show_mask:
        cls_segms = None


#    g()
    outputDir, name = dirname(path), filename(path)
    vis_one_image(
        rgb,  # BGR -> RGB for visualization
        name,
        outputDir,
        cls_boxes,
        cls_segms,
        cls_keyps,
        dataset=dataset,
        box_alpha=0.8,
        show_class=True,
        thresh=thresh,
        kp_thresh=2,
        pltshow=pltshow,
    )